The first two I've been meaning to do for a long time. The 3rd is (optional) follow-up to a pretty late 4.13 change. The next two were suggested by Andrew to slightly increase the number of IRQs we could handle in total, seeing that IRQ vectors are a relatively scarce resource. The last one is a result of me noticing, while doing the earlier ones, pointless repeated re-building of, in particular, the relative slow to build insn emulator (which should be unconcerned of IRQ vector arrangement adjustments). 1: IRQ: move do_IRQ() 2: IRQ: move and rename __do_IRQ_guest() 3: IRQ: simplify pending EOI stack logic for internally used IRQs 4: IRQ: flip legacy and dynamic vector ranges 5: IRQ: re-use legacy vector ranges on APs 6: move and rename NR_VECTORS Jan _______________________________________________ Xen-devel mailing list Xen-devel@lists.xenproject.org https://lists.xenproject.org/mailman/listinfo/xen-devel
This is to avoid forward declarations of static functions. Beyond the
actual code movement this does
- u8 -> uint8_t,
- convert to Xen style,
- drop unnecessary parentheses and alike,
- strip trailing white space.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -437,9 +437,6 @@ int __init init_irq_data(void)
return 0;
}
-static void __do_IRQ_guest(int vector);
-static void flush_ready_eoi(void);
-
static void ack_none(struct irq_desc *desc)
{
ack_bad_irq(desc->irq);
@@ -897,145 +894,6 @@ void alloc_direct_apic_vector(
spin_unlock(&lock);
}
-void do_IRQ(struct cpu_user_regs *regs)
-{
- struct irqaction *action;
- uint32_t tsc_in;
- struct irq_desc *desc;
- unsigned int vector = (u8)regs->entry_vector;
- int irq = this_cpu(vector_irq)[vector];
- struct cpu_user_regs *old_regs = set_irq_regs(regs);
-
- perfc_incr(irqs);
- this_cpu(irq_count)++;
- irq_enter();
-
- if (irq < 0) {
- if (direct_apic_vector[vector] != NULL) {
- (*direct_apic_vector[vector])(regs);
- } else {
- const char *kind = ", LAPIC";
-
- if ( apic_isr_read(vector) )
- ack_APIC_irq();
- else
- kind = "";
- if ( ! ( vector >= FIRST_LEGACY_VECTOR &&
- vector <= LAST_LEGACY_VECTOR &&
- bogus_8259A_irq(vector - FIRST_LEGACY_VECTOR) ) )
- {
- printk("CPU%u: No irq handler for vector %02x (IRQ %d%s)\n",
- smp_processor_id(), vector, irq, kind);
- desc = irq_to_desc(~irq);
- if ( ~irq < nr_irqs && irq_desc_initialized(desc) )
- {
- spin_lock(&desc->lock);
- printk("IRQ%d a=%04lx[%04lx,%04lx] v=%02x[%02x] t=%s s=%08x\n",
- ~irq, *cpumask_bits(desc->affinity),
- *cpumask_bits(desc->arch.cpu_mask),
- *cpumask_bits(desc->arch.old_cpu_mask),
- desc->arch.vector, desc->arch.old_vector,
- desc->handler->typename, desc->status);
- spin_unlock(&desc->lock);
- }
- }
- TRACE_1D(TRC_HW_IRQ_UNMAPPED_VECTOR, vector);
- }
- goto out_no_unlock;
- }
-
- desc = irq_to_desc(irq);
-
- spin_lock(&desc->lock);
- desc->handler->ack(desc);
-
- if ( likely(desc->status & IRQ_GUEST) )
- {
- if ( irq_ratelimit_timer.function && /* irq rate limiting enabled? */
- unlikely(desc->rl_cnt++ >= irq_ratelimit_threshold) )
- {
- s_time_t now = NOW();
- if ( now < (desc->rl_quantum_start + MILLISECS(10)) )
- {
- desc->handler->disable(desc);
- /*
- * If handler->disable doesn't actually mask the interrupt, a
- * disabled irq still can fire. This check also avoids possible
- * deadlocks if ratelimit_timer_fn runs at the same time.
- */
- if ( likely(list_empty(&desc->rl_link)) )
- {
- spin_lock(&irq_ratelimit_lock);
- if ( list_empty(&irq_ratelimit_list) )
- set_timer(&irq_ratelimit_timer, now + MILLISECS(10));
- list_add(&desc->rl_link, &irq_ratelimit_list);
- spin_unlock(&irq_ratelimit_lock);
- }
- goto out;
- }
- desc->rl_cnt = 0;
- desc->rl_quantum_start = now;
- }
-
- tsc_in = tb_init_done ? get_cycles() : 0;
- __do_IRQ_guest(irq);
- TRACE_3D(TRC_HW_IRQ_HANDLED, irq, tsc_in, get_cycles());
- goto out_no_end;
- }
-
- desc->status &= ~IRQ_REPLAY;
- desc->status |= IRQ_PENDING;
-
- /*
- * Since we set PENDING, if another processor is handling a different
- * instance of this same irq, the other processor will take care of it.
- */
- if ( desc->status & (IRQ_DISABLED | IRQ_INPROGRESS) )
- goto out;
-
- desc->status |= IRQ_INPROGRESS;
-
- action = desc->action;
- while ( desc->status & IRQ_PENDING )
- {
- desc->status &= ~IRQ_PENDING;
- spin_unlock_irq(&desc->lock);
- tsc_in = tb_init_done ? get_cycles() : 0;
- action->handler(irq, action->dev_id, regs);
- TRACE_3D(TRC_HW_IRQ_HANDLED, irq, tsc_in, get_cycles());
- spin_lock_irq(&desc->lock);
- }
-
- desc->status &= ~IRQ_INPROGRESS;
-
- out:
- if ( desc->handler->end )
- {
- /*
- * If higher priority vectors still have their EOIs pending, we may
- * not issue an EOI here, as this would EOI the highest priority one.
- */
- if ( cpu_has_pending_apic_eoi() )
- {
- this_cpu(check_eoi_deferral) = true;
- desc->handler->end(desc, vector);
- this_cpu(check_eoi_deferral) = false;
-
- spin_unlock(&desc->lock);
- flush_ready_eoi();
- goto out_no_unlock;
- }
-
- desc->handler->end(desc, vector);
- }
-
- out_no_end:
- spin_unlock(&desc->lock);
- out_no_unlock:
- irq_exit();
- set_irq_regs(old_regs);
-}
-
static void irq_ratelimit_timer_fn(void *data)
{
struct irq_desc *desc, *tmp;
@@ -2012,6 +1870,150 @@ static bool pirq_guest_force_unbind(stru
return bound;
}
+void do_IRQ(struct cpu_user_regs *regs)
+{
+ struct irqaction *action;
+ uint32_t tsc_in;
+ struct irq_desc *desc;
+ unsigned int vector = (uint8_t)regs->entry_vector;
+ int irq = this_cpu(vector_irq)[vector];
+ struct cpu_user_regs *old_regs = set_irq_regs(regs);
+
+ perfc_incr(irqs);
+ this_cpu(irq_count)++;
+ irq_enter();
+
+ if ( irq < 0 )
+ {
+ if ( direct_apic_vector[vector] )
+ direct_apic_vector[vector](regs);
+ else
+ {
+ const char *kind = ", LAPIC";
+
+ if ( apic_isr_read(vector) )
+ ack_APIC_irq();
+ else
+ kind = "";
+ if ( !(vector >= FIRST_LEGACY_VECTOR &&
+ vector <= LAST_LEGACY_VECTOR &&
+ bogus_8259A_irq(vector - FIRST_LEGACY_VECTOR)) )
+ {
+ printk("CPU%u: No irq handler for vector %02x (IRQ %d%s)\n",
+ smp_processor_id(), vector, irq, kind);
+ desc = irq_to_desc(~irq);
+ if ( ~irq < nr_irqs && irq_desc_initialized(desc) )
+ {
+ spin_lock(&desc->lock);
+ printk("IRQ%d a=%04lx[%04lx,%04lx] v=%02x[%02x] t=%s s=%08x\n",
+ ~irq, *cpumask_bits(desc->affinity),
+ *cpumask_bits(desc->arch.cpu_mask),
+ *cpumask_bits(desc->arch.old_cpu_mask),
+ desc->arch.vector, desc->arch.old_vector,
+ desc->handler->typename, desc->status);
+ spin_unlock(&desc->lock);
+ }
+ }
+ TRACE_1D(TRC_HW_IRQ_UNMAPPED_VECTOR, vector);
+ }
+ goto out_no_unlock;
+ }
+
+ desc = irq_to_desc(irq);
+
+ spin_lock(&desc->lock);
+ desc->handler->ack(desc);
+
+ if ( likely(desc->status & IRQ_GUEST) )
+ {
+ if ( irq_ratelimit_timer.function && /* irq rate limiting enabled? */
+ unlikely(desc->rl_cnt++ >= irq_ratelimit_threshold) )
+ {
+ s_time_t now = NOW();
+
+ if ( now < (desc->rl_quantum_start + MILLISECS(10)) )
+ {
+ desc->handler->disable(desc);
+ /*
+ * If handler->disable doesn't actually mask the interrupt, a
+ * disabled irq still can fire. This check also avoids possible
+ * deadlocks if ratelimit_timer_fn runs at the same time.
+ */
+ if ( likely(list_empty(&desc->rl_link)) )
+ {
+ spin_lock(&irq_ratelimit_lock);
+ if ( list_empty(&irq_ratelimit_list) )
+ set_timer(&irq_ratelimit_timer, now + MILLISECS(10));
+ list_add(&desc->rl_link, &irq_ratelimit_list);
+ spin_unlock(&irq_ratelimit_lock);
+ }
+ goto out;
+ }
+ desc->rl_cnt = 0;
+ desc->rl_quantum_start = now;
+ }
+
+ tsc_in = tb_init_done ? get_cycles() : 0;
+ __do_IRQ_guest(irq);
+ TRACE_3D(TRC_HW_IRQ_HANDLED, irq, tsc_in, get_cycles());
+ goto out_no_end;
+ }
+
+ desc->status &= ~IRQ_REPLAY;
+ desc->status |= IRQ_PENDING;
+
+ /*
+ * Since we set PENDING, if another processor is handling a different
+ * instance of this same irq, the other processor will take care of it.
+ */
+ if ( desc->status & (IRQ_DISABLED | IRQ_INPROGRESS) )
+ goto out;
+
+ desc->status |= IRQ_INPROGRESS;
+
+ action = desc->action;
+ while ( desc->status & IRQ_PENDING )
+ {
+ desc->status &= ~IRQ_PENDING;
+ spin_unlock_irq(&desc->lock);
+
+ tsc_in = tb_init_done ? get_cycles() : 0;
+ action->handler(irq, action->dev_id, regs);
+ TRACE_3D(TRC_HW_IRQ_HANDLED, irq, tsc_in, get_cycles());
+
+ spin_lock_irq(&desc->lock);
+ }
+
+ desc->status &= ~IRQ_INPROGRESS;
+
+ out:
+ if ( desc->handler->end )
+ {
+ /*
+ * If higher priority vectors still have their EOIs pending, we may
+ * not issue an EOI here, as this would EOI the highest priority one.
+ */
+ if ( cpu_has_pending_apic_eoi() )
+ {
+ this_cpu(check_eoi_deferral) = true;
+ desc->handler->end(desc, vector);
+ this_cpu(check_eoi_deferral) = false;
+
+ spin_unlock(&desc->lock);
+ flush_ready_eoi();
+ goto out_no_unlock;
+ }
+
+ desc->handler->end(desc, vector);
+ }
+
+ out_no_end:
+ spin_unlock(&desc->lock);
+ out_no_unlock:
+ irq_exit();
+ set_irq_regs(old_regs);
+}
+
static inline bool is_free_pirq(const struct domain *d,
const struct pirq *pirq)
{
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
On 20/12/2019 13:29, Jan Beulich wrote: > This is to avoid forward declarations of static functions. Beyond the > actual code movement this does > - u8 -> uint8_t, > - convert to Xen style, > - drop unnecessary parentheses and alike, > - strip trailing white space. > > Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Andrew Cooper <andrew.cooper3@citrix.com> _______________________________________________ Xen-devel mailing list Xen-devel@lists.xenproject.org https://lists.xenproject.org/mailman/listinfo/xen-devel
This is for it to be next to do_IRQ(). Beyond the actual code movement
this
- drops the leading underscores,
- passes in desc and vector, rather than irq,
- flips the order of two ASSERT()s,
- changes i and sp to unsigned int,
- restricts the scope of d and sp,
- corrects style.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -1144,64 +1144,6 @@ static void irq_guest_eoi_timer_fn(void
spin_unlock_irq(&desc->lock);
}
-static void __do_IRQ_guest(int irq)
-{
- struct irq_desc *desc = irq_to_desc(irq);
- irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
- struct domain *d;
- int i, sp;
- struct pending_eoi *peoi = this_cpu(pending_eoi);
- unsigned int vector = (u8)get_irq_regs()->entry_vector;
-
- if ( unlikely(action->nr_guests == 0) )
- {
- /* An interrupt may slip through while freeing an ACKTYPE_EOI irq. */
- ASSERT(action->ack_type == ACKTYPE_EOI);
- ASSERT(desc->status & IRQ_DISABLED);
- if ( desc->handler->end )
- desc->handler->end(desc, vector);
- return;
- }
-
- /*
- * Stop the timer as soon as we're certain we'll set it again further down,
- * to prevent the current timeout (if any) to needlessly expire.
- */
- if ( action->ack_type != ACKTYPE_NONE )
- stop_timer(&action->eoi_timer);
-
- if ( action->ack_type == ACKTYPE_EOI )
- {
- sp = pending_eoi_sp(peoi);
- ASSERT((sp == 0) || (peoi[sp-1].vector < vector));
- ASSERT(sp < (NR_DYNAMIC_VECTORS-1));
- peoi[sp].irq = irq;
- peoi[sp].vector = vector;
- peoi[sp].ready = 0;
- pending_eoi_sp(peoi) = sp+1;
- cpumask_set_cpu(smp_processor_id(), action->cpu_eoi_map);
- }
-
- for ( i = 0; i < action->nr_guests; i++ )
- {
- struct pirq *pirq;
-
- d = action->guest[i];
- pirq = pirq_info(d, domain_irq_to_pirq(d, irq));
- if ( (action->ack_type != ACKTYPE_NONE) &&
- !test_and_set_bool(pirq->masked) )
- action->in_flight++;
- if ( !is_hvm_domain(d) || !hvm_do_IRQ_dpci(d, pirq) )
- send_guest_pirq(d, pirq);
- }
-
- if ( action->ack_type != ACKTYPE_NONE )
- {
- migrate_timer(&action->eoi_timer, smp_processor_id());
- set_timer(&action->eoi_timer, NOW() + MILLISECS(1));
- }
-}
-
/*
* Retrieve Xen irq-descriptor corresponding to a domain-specific irq.
* The descriptor is returned locked. This function is safe against changes
@@ -1870,6 +1812,62 @@ static bool pirq_guest_force_unbind(stru
return bound;
}
+static void do_IRQ_guest(struct irq_desc *desc, unsigned int vector)
+{
+ irq_guest_action_t *action = (irq_guest_action_t *)desc->action;
+ unsigned int i;
+ struct pending_eoi *peoi = this_cpu(pending_eoi);
+
+ if ( unlikely(!action->nr_guests) )
+ {
+ /* An interrupt may slip through while freeing an ACKTYPE_EOI irq. */
+ ASSERT(action->ack_type == ACKTYPE_EOI);
+ ASSERT(desc->status & IRQ_DISABLED);
+ if ( desc->handler->end )
+ desc->handler->end(desc, vector);
+ return;
+ }
+
+ /*
+ * Stop the timer as soon as we're certain we'll set it again further down,
+ * to prevent the current timeout (if any) to needlessly expire.
+ */
+ if ( action->ack_type != ACKTYPE_NONE )
+ stop_timer(&action->eoi_timer);
+
+ if ( action->ack_type == ACKTYPE_EOI )
+ {
+ unsigned int sp = pending_eoi_sp(peoi);
+
+ ASSERT(sp < (NR_DYNAMIC_VECTORS - 1));
+ ASSERT(!sp || (peoi[sp - 1].vector < vector));
+ peoi[sp].irq = desc->irq;
+ peoi[sp].vector = vector;
+ peoi[sp].ready = 0;
+ pending_eoi_sp(peoi) = sp + 1;
+ cpumask_set_cpu(smp_processor_id(), action->cpu_eoi_map);
+ }
+
+ for ( i = 0; i < action->nr_guests; i++ )
+ {
+ struct domain *d = action->guest[i];
+ struct pirq *pirq;
+
+ pirq = pirq_info(d, domain_irq_to_pirq(d, desc->irq));
+ if ( (action->ack_type != ACKTYPE_NONE) &&
+ !test_and_set_bool(pirq->masked) )
+ action->in_flight++;
+ if ( !is_hvm_domain(d) || !hvm_do_IRQ_dpci(d, pirq) )
+ send_guest_pirq(d, pirq);
+ }
+
+ if ( action->ack_type != ACKTYPE_NONE )
+ {
+ migrate_timer(&action->eoi_timer, smp_processor_id());
+ set_timer(&action->eoi_timer, NOW() + MILLISECS(1));
+ }
+}
+
void do_IRQ(struct cpu_user_regs *regs)
{
struct irqaction *action;
@@ -1954,7 +1952,7 @@ void do_IRQ(struct cpu_user_regs *regs)
}
tsc_in = tb_init_done ? get_cycles() : 0;
- __do_IRQ_guest(irq);
+ do_IRQ_guest(desc, vector);
TRACE_3D(TRC_HW_IRQ_HANDLED, irq, tsc_in, get_cycles());
goto out_no_end;
}
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
On 20/12/2019 13:29, Jan Beulich wrote:
> + for ( i = 0; i < action->nr_guests; i++ )
> + {
> + struct domain *d = action->guest[i];
> + struct pirq *pirq;
> +
> + pirq = pirq_info(d, domain_irq_to_pirq(d, desc->irq));
You could drop one further line by folding this into its declaration.
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
In 5655ce8b1ec2 ("x86/IRQ: make internally used IRQs also honor the
pending EOI stack") it was mentioned that both the check_eoi_deferral
per-CPU variable and the cpu_has_pending_apic_eoi() were added just to
have as little impact on existing behavior as possible, to reduce the
risk of a last minute regression in 4.13.
Upon closer inspection, dropping the variable is an option only if all
callers of ->end() would assume the responsibility of also calling
flush_ready_eoi(). Therefore only drop the cpu_has_pending_apic_eoi()
guard now.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
TBD: In the end I'm not sure this is really worth it then.
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -1991,18 +1991,13 @@ void do_IRQ(struct cpu_user_regs *regs)
* If higher priority vectors still have their EOIs pending, we may
* not issue an EOI here, as this would EOI the highest priority one.
*/
- if ( cpu_has_pending_apic_eoi() )
- {
- this_cpu(check_eoi_deferral) = true;
- desc->handler->end(desc, vector);
- this_cpu(check_eoi_deferral) = false;
-
- spin_unlock(&desc->lock);
- flush_ready_eoi();
- goto out_no_unlock;
- }
-
+ this_cpu(check_eoi_deferral) = true;
desc->handler->end(desc, vector);
+ this_cpu(check_eoi_deferral) = false;
+
+ spin_unlock(&desc->lock);
+ flush_ready_eoi();
+ goto out_no_unlock;
}
out_no_end:
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
On 20/12/2019 13:29, Jan Beulich wrote:
> In 5655ce8b1ec2 ("x86/IRQ: make internally used IRQs also honor the
> pending EOI stack") it was mentioned that both the check_eoi_deferral
> per-CPU variable and the cpu_has_pending_apic_eoi() were added just to
> have as little impact on existing behavior as possible, to reduce the
> risk of a last minute regression in 4.13.
>
> Upon closer inspection, dropping the variable is an option only if all
> callers of ->end() would assume the responsibility of also calling
> flush_ready_eoi(). Therefore only drop the cpu_has_pending_apic_eoi()
> guard now.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> TBD: In the end I'm not sure this is really worth it then.
The resulting logic is more simple, which is justification alone.
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
There's no reason to have the PIC vectors (which are typically entirely
unused on 64-bit systems anyway) right below the high priority ones. Put
them in the lowest possible range, and shift the dynamic vector range up
accordingly.
Note that irq_move_cleanup_interrupt(), despite using
FIRST_DYNAMIC_VECTOR, does not get touched, as PIC interrupts aren't
movable.
Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/apic.c
+++ b/xen/arch/x86/apic.c
@@ -525,9 +525,9 @@ void setup_local_APIC(void)
init_apic_ldr();
/*
- * Set Task Priority to reject any interrupts below FIRST_DYNAMIC_VECTOR.
+ * Set Task Priority to reject any interrupts below FIRST_IRQ_VECTOR.
*/
- apic_write(APIC_TASKPRI, (FIRST_DYNAMIC_VECTOR & 0xF0) - 0x10);
+ apic_write(APIC_TASKPRI, (FIRST_IRQ_VECTOR & 0xF0) - 0x10);
/*
* After a crash, we no longer service the interrupts and a pending
--- a/xen/arch/x86/io_apic.c
+++ b/xen/arch/x86/io_apic.c
@@ -2388,7 +2388,9 @@ int ioapic_guest_write(unsigned long phy
return 0;
}
- if ( desc->arch.vector <= 0 || desc->arch.vector > LAST_DYNAMIC_VECTOR )
+ if ( desc->arch.vector <= 0 || desc->arch.vector > LAST_DYNAMIC_VECTOR ||
+ (desc->arch.vector >= FIRST_LEGACY_VECTOR &&
+ desc->arch.vector <= LAST_LEGACY_VECTOR) )
{
int vector = desc->arch.vector;
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -101,7 +101,7 @@ void unlock_vector_lock(void)
static inline bool valid_irq_vector(unsigned int vector)
{
- return vector >= FIRST_DYNAMIC_VECTOR && vector <= LAST_HIPRIORITY_VECTOR;
+ return vector >= FIRST_IRQ_VECTOR && vector <= LAST_IRQ_VECTOR;
}
static void release_old_vec(struct irq_desc *desc)
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -923,9 +923,9 @@ autogen_stubs: /* Automatically generate
/* Common interrupts, heading towards do_IRQ(). */
#ifdef CONFIG_PV
- .if vec >= FIRST_DYNAMIC_VECTOR && vec != HYPERCALL_VECTOR && vec != LEGACY_SYSCALL_VECTOR
+ .if vec >= FIRST_IRQ_VECTOR && vec != HYPERCALL_VECTOR && vec != LEGACY_SYSCALL_VECTOR
#else
- .if vec >= FIRST_DYNAMIC_VECTOR
+ .if vec >= FIRST_IRQ_VECTOR
#endif
ALIGN
--- a/xen/include/asm-x86/mach-default/irq_vectors.h
+++ b/xen/include/asm-x86/mach-default/irq_vectors.h
@@ -18,20 +18,23 @@
/* IRQ0 (timer) is statically allocated but must be high priority. */
#define IRQ0_VECTOR 0xf0
-/* Legacy PIC uses vectors 0xe0-0xef. */
-#define FIRST_LEGACY_VECTOR 0xe0
-#define LAST_LEGACY_VECTOR 0xef
+/* Legacy PIC uses vectors 0x20-0x2f. */
+#define FIRST_LEGACY_VECTOR 0x20
+#define LAST_LEGACY_VECTOR (FIRST_LEGACY_VECTOR + 0xf)
#define HYPERCALL_VECTOR 0x82
#define LEGACY_SYSCALL_VECTOR 0x80
/* Dynamically-allocated vectors available to any driver. */
-#define FIRST_DYNAMIC_VECTOR 0x20
-#define LAST_DYNAMIC_VECTOR 0xdf
+#define FIRST_DYNAMIC_VECTOR (LAST_LEGACY_VECTOR + 1)
+#define LAST_DYNAMIC_VECTOR 0xef
#define NR_DYNAMIC_VECTORS (LAST_DYNAMIC_VECTOR - FIRST_DYNAMIC_VECTOR + 1)
#define IRQ_MOVE_CLEANUP_VECTOR FIRST_DYNAMIC_VECTOR
#define NR_VECTORS 256
+#define FIRST_IRQ_VECTOR FIRST_LEGACY_VECTOR
+#define LAST_IRQ_VECTOR LAST_HIPRIORITY_VECTOR
+
#endif /* _ASM_IRQ_VECTORS_H */
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
On 20/12/2019 13:29, Jan Beulich wrote: > There's no reason to have the PIC vectors (which are typically entirely > unused on 64-bit systems anyway) right below the high priority ones. Put > them in the lowest possible range, and shift the dynamic vector range up > accordingly. It might be helpful to explain why, which is to reduce the priority of PIC vectors in the LAPIC vs everything else. > > Note that irq_move_cleanup_interrupt(), despite using > FIRST_DYNAMIC_VECTOR, does not get touched, as PIC interrupts aren't > movable. > > Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com> > Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Andrew Cooper <andrew.cooper3@citrix.com> _______________________________________________ Xen-devel mailing list Xen-devel@lists.xenproject.org https://lists.xenproject.org/mailman/listinfo/xen-devel
The legacy vectors have been actively used on CPU 0 only. CPUs not
sharing vector space with CPU 0 can easily re-use them, slightly
increasing the relatively scarce resource of total vectors available in
the system. As a result the legacy vector range simply becomes a
sub-range of the dynamic one, with an extra check performed in
_assign_irq_vector() (we can't rely on the
"per_cpu(vector_irq, new_cpu)[vector] >= 0" check in the subsequent
loop, as we need to also exclude vectors of disabled legacy IRQs).
Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/io_apic.c
+++ b/xen/arch/x86/io_apic.c
@@ -2389,8 +2389,7 @@ int ioapic_guest_write(unsigned long phy
}
if ( desc->arch.vector <= 0 || desc->arch.vector > LAST_DYNAMIC_VECTOR ||
- (desc->arch.vector >= FIRST_LEGACY_VECTOR &&
- desc->arch.vector <= LAST_LEGACY_VECTOR) )
+ desc->handler->enable == enable_8259A_irq )
{
int vector = desc->arch.vector;
@@ -2617,7 +2616,7 @@ void __init init_ioapic_mappings(void)
if ( nr_irqs == 0 )
nr_irqs = cpu_has_apic ?
- max(16U + num_present_cpus() * NR_DYNAMIC_VECTORS,
+ max(0U + num_present_cpus() * NR_DYNAMIC_VECTORS,
8 * nr_irqs_gsi) :
nr_irqs_gsi;
else if ( nr_irqs < 16 )
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -361,17 +361,25 @@ void destroy_irq(unsigned int irq)
int irq_to_vector(int irq)
{
int vector = IRQ_VECTOR_UNASSIGNED;
+ const struct irq_desc *desc;
BUG_ON(irq >= nr_irqs || irq < 0);
+ desc = irq_to_desc(irq);
if (IO_APIC_IRQ(irq))
{
- vector = irq_to_desc(irq)->arch.vector;
- if (vector >= FIRST_LEGACY_VECTOR && vector <= LAST_LEGACY_VECTOR)
+ vector = desc->arch.vector;
+ /*
+ * Both parts of the condition are needed here during early boot, as
+ * at that time IRQ0 in particular may still have the 8259A chip set,
+ * but has already got its special IRQ0_VECTOR.
+ */
+ if ( desc->handler->enable == enable_8259A_irq &&
+ vector >= FIRST_LEGACY_VECTOR && vector <= LAST_LEGACY_VECTOR )
vector = 0;
}
else if (MSI_IRQ(irq))
- vector = irq_to_desc(irq)->arch.vector;
+ vector = desc->arch.vector;
else
vector = LEGACY_VECTOR(irq);
@@ -568,6 +576,10 @@ next:
&& test_bit(vector, irq_used_vectors) )
goto next;
+ if ( cpumask_test_cpu(0, vec_mask) &&
+ vector >= FIRST_LEGACY_VECTOR && vector <= LAST_LEGACY_VECTOR )
+ goto next;
+
for_each_cpu(new_cpu, vec_mask)
if (per_cpu(vector_irq, new_cpu)[vector] >= 0)
goto next;
@@ -713,6 +725,10 @@ void irq_move_cleanup_interrupt(struct c
{
unsigned vector, me;
+ /* This interrupt should not nest inside others. */
+ BUILD_BUG_ON(APIC_PRIO_CLASS(IRQ_MOVE_CLEANUP_VECTOR) !=
+ APIC_PRIO_CLASS(FIRST_DYNAMIC_VECTOR));
+
ack_APIC_irq();
me = smp_processor_id();
@@ -730,14 +746,15 @@ void irq_move_cleanup_interrupt(struct c
if ((int)irq < 0)
continue;
- if ( vector >= FIRST_LEGACY_VECTOR && vector <= LAST_LEGACY_VECTOR )
- continue;
-
desc = irq_to_desc(irq);
if (!desc)
continue;
spin_lock(&desc->lock);
+
+ if (desc->handler->enable == enable_8259A_irq)
+ goto unlock;
+
if (!desc->arch.move_cleanup_count)
goto unlock;
@@ -1895,6 +1912,7 @@ void do_IRQ(struct cpu_user_regs *regs)
kind = "";
if ( !(vector >= FIRST_LEGACY_VECTOR &&
vector <= LAST_LEGACY_VECTOR &&
+ !smp_processor_id() &&
bogus_8259A_irq(vector - FIRST_LEGACY_VECTOR)) )
{
printk("CPU%u: No irq handler for vector %02x (IRQ %d%s)\n",
--- a/xen/include/asm-x86/mach-default/irq_vectors.h
+++ b/xen/include/asm-x86/mach-default/irq_vectors.h
@@ -19,22 +19,27 @@
#define IRQ0_VECTOR 0xf0
/* Legacy PIC uses vectors 0x20-0x2f. */
-#define FIRST_LEGACY_VECTOR 0x20
+#define FIRST_LEGACY_VECTOR FIRST_DYNAMIC_VECTOR
#define LAST_LEGACY_VECTOR (FIRST_LEGACY_VECTOR + 0xf)
#define HYPERCALL_VECTOR 0x82
#define LEGACY_SYSCALL_VECTOR 0x80
-/* Dynamically-allocated vectors available to any driver. */
-#define FIRST_DYNAMIC_VECTOR (LAST_LEGACY_VECTOR + 1)
+/*
+ * Dynamically-allocated vectors available to any driver. Note that the
+ * legacy vector range is a sub-range of this one, re-used on CPUs not
+ * sharing vectors with CPU 0.
+ */
+#define FIRST_DYNAMIC_VECTOR 0x20
#define LAST_DYNAMIC_VECTOR 0xef
#define NR_DYNAMIC_VECTORS (LAST_DYNAMIC_VECTOR - FIRST_DYNAMIC_VECTOR + 1)
-#define IRQ_MOVE_CLEANUP_VECTOR FIRST_DYNAMIC_VECTOR
+/* There's no IRQ2 at the PIC. */
+#define IRQ_MOVE_CLEANUP_VECTOR (FIRST_LEGACY_VECTOR + 2)
#define NR_VECTORS 256
-#define FIRST_IRQ_VECTOR FIRST_LEGACY_VECTOR
+#define FIRST_IRQ_VECTOR FIRST_DYNAMIC_VECTOR
#define LAST_IRQ_VECTOR LAST_HIPRIORITY_VECTOR
#endif /* _ASM_IRQ_VECTORS_H */
--- a/xen/include/asm-x86/apicdef.h
+++ b/xen/include/asm-x86/apicdef.h
@@ -119,6 +119,9 @@
/* Only available in x2APIC mode */
#define APIC_SELF_IPI 0x3F0
+/* Applicable to vectors, TPR, and PPR. */
+#define APIC_PRIO_CLASS(v) ((v) & 0xF0)
+
#define APIC_BASE __fix_to_virt(FIX_APIC_BASE)
/* It's only used in x2APIC mode of an x2APIC unit. */
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
On 20/12/2019 13:30, Jan Beulich wrote:
> The legacy vectors have been actively used on CPU 0 only. CPUs not
> sharing vector space with CPU 0 can easily re-use them, slightly
> increasing the relatively scarce resource of total vectors available in
> the system.
I suppose this technically depends on ExtINT messages never targeting
CPUs other than 0.
Either way - I think its fine restriction to rely on.
> As a result the legacy vector range simply becomes a
> sub-range of the dynamic one, with an extra check performed in
> _assign_irq_vector() (we can't rely on the
> "per_cpu(vector_irq, new_cpu)[vector] >= 0" check in the subsequent
> loop, as we need to also exclude vectors of disabled legacy IRQs).
>
> Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> --- a/xen/arch/x86/io_apic.c
> +++ b/xen/arch/x86/io_apic.c
> @@ -2389,8 +2389,7 @@ int ioapic_guest_write(unsigned long phy
> }
>
> if ( desc->arch.vector <= 0 || desc->arch.vector > LAST_DYNAMIC_VECTOR ||
> - (desc->arch.vector >= FIRST_LEGACY_VECTOR &&
> - desc->arch.vector <= LAST_LEGACY_VECTOR) )
> + desc->handler->enable == enable_8259A_irq )
> {
> int vector = desc->arch.vector;
>
> @@ -2617,7 +2616,7 @@ void __init init_ioapic_mappings(void)
>
> if ( nr_irqs == 0 )
> nr_irqs = cpu_has_apic ?
We should strip the !cpu_has_apic paths because they are obsolete in
64bit processors. I guess this can wait for a future cleanup series.
> - max(16U + num_present_cpus() * NR_DYNAMIC_VECTORS,
> + max(0U + num_present_cpus() * NR_DYNAMIC_VECTORS,
num_present_cpus() really can't be negative. Neither can
cpumask_weight(). With a bit of API cleanup, this 0U cast can be dropped.
However, given this is the only concerned, Acked-by: Andrew Cooper
<andrew.cooper3@citrix.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
On 20.12.2019 15:34, Andrew Cooper wrote: > On 20/12/2019 13:30, Jan Beulich wrote: >> The legacy vectors have been actively used on CPU 0 only. CPUs not >> sharing vector space with CPU 0 can easily re-use them, slightly >> increasing the relatively scarce resource of total vectors available in >> the system. > > I suppose this technically depends on ExtINT messages never targeting > CPUs other than 0. > > Either way - I think its fine restriction to rely on. And setup_local_APIC() arranges for this. >> @@ -2617,7 +2616,7 @@ void __init init_ioapic_mappings(void) >> >> if ( nr_irqs == 0 ) >> nr_irqs = cpu_has_apic ? > > We should strip the !cpu_has_apic paths because they are obsolete in > 64bit processors. I guess this can wait for a future cleanup series. > >> - max(16U + num_present_cpus() * NR_DYNAMIC_VECTORS, >> + max(0U + num_present_cpus() * NR_DYNAMIC_VECTORS, > > num_present_cpus() really can't be negative. Neither can > cpumask_weight(). With a bit of API cleanup, this 0U cast can be dropped. Right, but that's for another day. > However, given this is the only concerned, Acked-by: Andrew Cooper > <andrew.cooper3@citrix.com> Thanks! Jan _______________________________________________ Xen-devel mailing list Xen-devel@lists.xenproject.org https://lists.xenproject.org/mailman/listinfo/xen-devel
This is an architectural definition, so move it to x86-defns.h and add
an X86_ prefix. This in particular allows removing the inclusion of
irq_vectors.h by virtually every source file, due to irq.h and
hvm/vmx/vmcs.h having needed to include it: Changes to IRQ vector usage
shouldn't really trigger full rebuilds.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/apic.c
+++ b/xen/arch/x86/apic.c
@@ -36,6 +36,7 @@
#include <asm/io_apic.h>
#include <mach_apic.h>
#include <io_ports.h>
+#include <irq_vectors.h>
#include <xen/kexec.h>
#include <asm/guest.h>
#include <asm/time.h>
--- a/xen/arch/x86/cpu/vpmu.c
+++ b/xen/arch/x86/cpu/vpmu.c
@@ -34,6 +34,7 @@
#include <asm/hvm/svm/svm.h>
#include <asm/hvm/svm/vmcb.h>
#include <asm/apic.h>
+#include <irq_vectors.h>
#include <public/pmu.h>
#include <xsm/xsm.h>
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -83,7 +83,7 @@ static void vlapic_do_init(struct vlapic
static int vlapic_find_highest_vector(const void *bitmap)
{
const uint32_t *word = bitmap;
- unsigned int word_offset = NR_VECTORS / 32;
+ unsigned int word_offset = X86_NR_VECTORS / 32;
/* Work backwards through the bitmap (first 32-bit word in every four). */
while ( (word_offset != 0) && (word[(--word_offset)*4] == 0) )
@@ -659,7 +659,7 @@ int guest_rdmsr_x2apic(const struct vcpu
REG(LVT0) | REG(LVT1) | REG(LVTERR) | REG(TMICT) |
REG(TMCCT) | REG(TDCR) |
#undef REG
-#define REGBLOCK(x) (((1UL << (NR_VECTORS / 32)) - 1) << (APIC_ ## x >> 4))
+#define REGBLOCK(x) (((1UL << (X86_NR_VECTORS / 32)) - 1) << (APIC_ ## x >> 4))
REGBLOCK(ISR) | REGBLOCK(TMR) | REGBLOCK(IRR)
#undef REGBLOCK
};
--- a/xen/arch/x86/hvm/vmx/intr.c
+++ b/xen/arch/x86/hvm/vmx/intr.c
@@ -352,7 +352,7 @@ void vmx_intr_assist(void)
{
word = (const void *)&vlapic->regs->data[APIC_IRR];
printk(XENLOG_ERR "vIRR:");
- for ( i = NR_VECTORS / 32; i-- ; )
+ for ( i = X86_NR_VECTORS / 32; i-- ; )
printk(" %08x", word[i*4]);
printk("\n");
}
@@ -362,7 +362,7 @@ void vmx_intr_assist(void)
{
word = (const void *)&pi_desc->pir;
printk(XENLOG_ERR " PIR:");
- for ( i = NR_VECTORS / 32; i-- ; )
+ for ( i = X86_NR_VECTORS / 32; i-- ; )
printk(" %08x", word[i]);
printk("\n");
}
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1161,7 +1161,7 @@ static int construct_vmcs(struct vcpu *v
unsigned int i;
/* EOI-exit bitmap */
- bitmap_zero(v->arch.hvm.vmx.eoi_exit_bitmap, NR_VECTORS);
+ bitmap_zero(v->arch.hvm.vmx.eoi_exit_bitmap, X86_NR_VECTORS);
for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.vmx.eoi_exit_bitmap); ++i )
__vmwrite(EOI_EXIT_BITMAP(i), 0);
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1955,7 +1955,7 @@ static void vmx_process_isr(int isr, str
* is acceptable because the subsequent interrupts will set up the eoi
* bitmap correctly.
*/
- for ( i = 0x10; i < NR_VECTORS; ++i )
+ for ( i = 0x10; i < X86_NR_VECTORS; ++i )
if ( vlapic_test_vector(i, &vlapic->regs->data[APIC_IRR]) ||
vlapic_test_vector(i, &vlapic->regs->data[APIC_ISR]) )
set_bit(i, v->arch.hvm.vmx.eoi_exit_bitmap);
@@ -2075,7 +2075,7 @@ static void vmx_sync_pir_to_irr(struct v
{
struct vlapic *vlapic = vcpu_vlapic(v);
unsigned int group, i;
- DECLARE_BITMAP(pending_intr, NR_VECTORS);
+ DECLARE_BITMAP(pending_intr, X86_NR_VECTORS);
if ( !pi_test_and_clear_on(&v->arch.hvm.vmx.pi_desc) )
return;
@@ -2083,7 +2083,7 @@ static void vmx_sync_pir_to_irr(struct v
for ( group = 0; group < ARRAY_SIZE(pending_intr); group++ )
pending_intr[group] = pi_get_pir(&v->arch.hvm.vmx.pi_desc, group);
- for_each_set_bit(i, pending_intr, NR_VECTORS)
+ for_each_set_bit(i, pending_intr, X86_NR_VECTORS)
vlapic_set_vector(i, &vlapic->regs->data[APIC_IRR]);
}
--- a/xen/arch/x86/i8259.c
+++ b/xen/arch/x86/i8259.c
@@ -20,6 +20,7 @@
#include <asm/apic.h>
#include <asm/asm_defns.h>
#include <io_ports.h>
+#include <irq_vectors.h>
/*
* This is the 'legacy' 8259A Programmable Interrupt Controller,
--- a/xen/arch/x86/io_apic.c
+++ b/xen/arch/x86/io_apic.c
@@ -35,6 +35,7 @@
#include <asm/setup.h>
#include <mach_apic.h>
#include <io_ports.h>
+#include <irq_vectors.h>
#include <public/physdev.h>
#include <xen/trace.h>
@@ -75,7 +76,7 @@ static void share_vector_maps(unsigned i
return;
bitmap_or(vector_map[src]->_bits, vector_map[src]->_bits,
- vector_map[dst]->_bits, NR_VECTORS);
+ vector_map[dst]->_bits, X86_NR_VECTORS);
for (pin = 0; pin < nr_ioapic_entries[dst]; ++pin) {
int irq = apic_pin_2_gsi_irq(dst, pin);
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -24,6 +24,7 @@
#include <asm/current.h>
#include <asm/flushtlb.h>
#include <asm/mach-generic/mach_apic.h>
+#include <irq_vectors.h>
#include <public/physdev.h>
static int parse_irq_vector_map_param(const char *s);
@@ -44,7 +45,7 @@ vmask_t global_used_vector_map;
struct irq_desc __read_mostly *irq_desc = NULL;
-static DECLARE_BITMAP(used_vectors, NR_VECTORS);
+static DECLARE_BITMAP(used_vectors, X86_NR_VECTORS);
static DEFINE_SPINLOCK(vector_lock);
@@ -149,7 +150,7 @@ static int __init _bind_irq_vector(struc
cpumask_t online_mask;
int cpu;
- BUG_ON((unsigned)vector >= NR_VECTORS);
+ BUG_ON((unsigned)vector >= X86_NR_VECTORS);
cpumask_and(&online_mask, cpu_mask, &cpu_online_map);
if (cpumask_empty(&online_mask))
@@ -416,7 +417,7 @@ int __init init_irq_data(void)
struct irq_desc *desc;
int irq, vector;
- for ( vector = 0; vector < NR_VECTORS; ++vector )
+ for ( vector = 0; vector < X86_NR_VECTORS; ++vector )
this_cpu(vector_irq)[vector] = INT_MIN;
irq_desc = xzalloc_array(struct irq_desc, nr_irqs);
@@ -662,7 +663,7 @@ void setup_vector_irq(unsigned int cpu)
unsigned int irq, vector;
/* Clear vector_irq */
- for ( vector = 0; vector < NR_VECTORS; ++vector )
+ for ( vector = 0; vector < X86_NR_VECTORS; ++vector )
per_cpu(vector_irq, cpu)[vector] = INT_MIN;
/* Mark the inuse vectors */
for ( irq = 0; irq < nr_irqs; ++irq )
@@ -890,7 +891,7 @@ uint8_t alloc_hipriority_vector(void)
return next++;
}
-static void (*direct_apic_vector[NR_VECTORS])(struct cpu_user_regs *);
+static void (*direct_apic_vector[X86_NR_VECTORS])(struct cpu_user_regs *);
void set_direct_apic_vector(
uint8_t vector, void (*handler)(struct cpu_user_regs *))
{
@@ -2510,7 +2511,7 @@ static void dump_irqs(unsigned char key)
process_pending_softirqs();
printk("Direct vector information:\n");
- for ( i = FIRST_DYNAMIC_VECTOR; i < NR_VECTORS; ++i )
+ for ( i = FIRST_DYNAMIC_VECTOR; i < X86_NR_VECTORS; ++i )
if ( direct_apic_vector[i] )
printk(" %#02x -> %ps()\n", i, direct_apic_vector[i]);
--- a/xen/arch/x86/msi.c
+++ b/xen/arch/x86/msi.c
@@ -27,6 +27,7 @@
#include <asm/p2m.h>
#include <mach_apic.h>
#include <io_ports.h>
+#include <irq_vectors.h>
#include <public/physdev.h>
#include <xen/iommu.h>
#include <xsm/xsm.h>
--- a/xen/arch/x86/pv/callback.c
+++ b/xen/arch/x86/pv/callback.c
@@ -358,7 +358,7 @@ long do_set_trap_table(XEN_GUEST_HANDLE_
/* If no table is presented then clear the entire virtual IDT. */
if ( guest_handle_is_null(traps) )
{
- memset(dst, 0, NR_VECTORS * sizeof(*dst));
+ memset(dst, 0, X86_NR_VECTORS * sizeof(*dst));
return 0;
}
@@ -403,7 +403,7 @@ int compat_set_trap_table(XEN_GUEST_HAND
/* If no table is presented then clear the entire virtual IDT. */
if ( guest_handle_is_null(traps) )
{
- memset(dst, 0, NR_VECTORS * sizeof(*dst));
+ memset(dst, 0, X86_NR_VECTORS * sizeof(*dst));
return 0;
}
--- a/xen/arch/x86/pv/domain.c
+++ b/xen/arch/x86/pv/domain.c
@@ -250,9 +250,9 @@ int pv_vcpu_initialise(struct vcpu *v)
if ( rc )
return rc;
- BUILD_BUG_ON(NR_VECTORS * sizeof(*v->arch.pv.trap_ctxt) >
+ BUILD_BUG_ON(X86_NR_VECTORS * sizeof(*v->arch.pv.trap_ctxt) >
PAGE_SIZE);
- v->arch.pv.trap_ctxt = xzalloc_array(struct trap_info, NR_VECTORS);
+ v->arch.pv.trap_ctxt = xzalloc_array(struct trap_info, X86_NR_VECTORS);
if ( !v->arch.pv.trap_ctxt )
{
rc = -ENOMEM;
--- a/xen/arch/x86/pv/hypercall.c
+++ b/xen/arch/x86/pv/hypercall.c
@@ -23,6 +23,7 @@
#include <xen/hypercall.h>
#include <xen/nospec.h>
#include <xen/trace.h>
+#include <irq_vectors.h>
#define HYPERCALL(x) \
[ __HYPERVISOR_ ## x ] = { (hypercall_fn_t *) do_ ## x, \
--- a/xen/arch/x86/pv/traps.c
+++ b/xen/arch/x86/pv/traps.c
@@ -28,6 +28,7 @@
#include <asm/apic.h>
#include <asm/shared.h>
#include <asm/traps.h>
+#include <irq_vectors.h>
void do_entry_int82(struct cpu_user_regs *regs)
{
--- a/xen/arch/x86/smp.c
+++ b/xen/arch/x86/smp.c
@@ -20,6 +20,7 @@
#include <asm/hardirq.h>
#include <asm/hpet.h>
#include <asm/hvm/support.h>
+#include <irq_vectors.h>
#include <mach_apic.h>
/*
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -44,6 +44,7 @@
#include <asm/spec_ctrl.h>
#include <asm/time.h>
#include <asm/tboot.h>
+#include <irq_vectors.h>
#include <mach_apic.h>
unsigned long __read_mostly trampoline_phys;
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1992,7 +1992,7 @@ void __init init_idt_traps(void)
this_cpu(compat_gdt) = boot_compat_gdt;
}
-extern void (*const autogen_entrypoints[NR_VECTORS])(void);
+extern void (*const autogen_entrypoints[X86_NR_VECTORS])(void);
void __init trap_init(void)
{
unsigned int vector;
@@ -2002,7 +2002,7 @@ void __init trap_init(void)
pv_trap_init();
- for ( vector = 0; vector < NR_VECTORS; ++vector )
+ for ( vector = 0; vector < X86_NR_VECTORS; ++vector )
{
if ( autogen_entrypoints[vector] )
{
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -919,7 +919,7 @@ GLOBAL(autogen_entrypoints)
autogen_stubs: /* Automatically generated stubs. */
vec = 0
- .rept NR_VECTORS
+ .rept X86_NR_VECTORS
/* Common interrupts, heading towards do_IRQ(). */
#ifdef CONFIG_PV
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -19,7 +19,6 @@
#define __ASM_X86_HVM_VMX_VMCS_H__
#include <asm/hvm/io.h>
-#include <irq_vectors.h>
extern void vmcs_dump_vcpu(struct vcpu *v);
extern void setup_vmcs_dump(void);
@@ -84,7 +83,7 @@ struct vmx_msr_bitmap {
};
struct pi_desc {
- DECLARE_BITMAP(pir, NR_VECTORS);
+ DECLARE_BITMAP(pir, X86_NR_VECTORS);
union {
struct {
u16 on : 1, /* bit 256 - Outstanding Notification */
@@ -150,7 +149,7 @@ struct vmx_vcpu {
unsigned int host_msr_count;
unsigned long eoi_exitmap_changed;
- DECLARE_BITMAP(eoi_exit_bitmap, NR_VECTORS);
+ DECLARE_BITMAP(eoi_exit_bitmap, X86_NR_VECTORS);
struct pi_desc pi_desc;
unsigned long host_cr0;
--- a/xen/include/asm-x86/irq.h
+++ b/xen/include/asm-x86/irq.h
@@ -9,7 +9,6 @@
#include <xen/percpu.h>
#include <xen/smp.h>
#include <asm/hvm/irq.h>
-#include <irq_vectors.h>
extern unsigned int nr_irqs_gsi;
extern unsigned int nr_irqs;
@@ -24,7 +23,7 @@ extern unsigned int nr_irqs;
#define LEGACY_VECTOR(irq) ((irq) + FIRST_LEGACY_VECTOR)
typedef struct {
- DECLARE_BITMAP(_bits,NR_VECTORS);
+ DECLARE_BITMAP(_bits, X86_NR_VECTORS);
} vmask_t;
struct irq_desc;
@@ -59,7 +58,7 @@ struct arch_irq_desc {
#define IRQ_VECTOR_UNASSIGNED (-1)
-typedef int vector_irq_t[NR_VECTORS];
+typedef int vector_irq_t[X86_NR_VECTORS];
DECLARE_PER_CPU(vector_irq_t, vector_irq);
extern bool opt_noirqbalance;
--- a/xen/include/asm-x86/mach-default/irq_vectors.h
+++ b/xen/include/asm-x86/mach-default/irq_vectors.h
@@ -37,8 +37,6 @@
/* There's no IRQ2 at the PIC. */
#define IRQ_MOVE_CLEANUP_VECTOR (FIRST_LEGACY_VECTOR + 2)
-#define NR_VECTORS 256
-
#define FIRST_IRQ_VECTOR FIRST_DYNAMIC_VECTOR
#define LAST_IRQ_VECTOR LAST_HIPRIORITY_VECTOR
--- a/xen/include/asm-x86/x86-defns.h
+++ b/xen/include/asm-x86/x86-defns.h
@@ -116,4 +116,6 @@
#define X86_INVPCID_ALL_INCL_GLOBAL 2
#define X86_INVPCID_ALL_NON_GLOBAL 3
+#define X86_NR_VECTORS 256
+
#endif /* __XEN_X86_DEFNS_H__ */
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
On 20/12/2019 13:31, Jan Beulich wrote: > This is an architectural definition, so move it to x86-defns.h and add > an X86_ prefix. This in particular allows removing the inclusion of > irq_vectors.h by virtually every source file, due to irq.h and > hvm/vmx/vmcs.h having needed to include it: Changes to IRQ vector usage > shouldn't really trigger full rebuilds. > > Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Andrew Cooper <andrew.cooper3@citrix.com> _______________________________________________ Xen-devel mailing list Xen-devel@lists.xenproject.org https://lists.xenproject.org/mailman/listinfo/xen-devel
© 2016 - 2026 Red Hat, Inc.