Iterate over all target logical IDs in the AVIC kick fastpath instead of
bailing if there is more than one target and KVM's optimized APIC map is
enabled for logical mode. If the optimized map is enabled, all vCPUs are
guaranteed to be mapped 1:1 to a logical ID or effectively have logical
mode disabled, i.e. iterating over the bitmap is guaranteed to kick each
target exactly once.
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
arch/x86/kvm/svm/avic.c | 126 +++++++++++++++++++++++++---------------
1 file changed, 79 insertions(+), 47 deletions(-)
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 2095ece70712..dad5affe44c1 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -339,6 +339,62 @@ static void avic_kick_vcpu(struct kvm_vcpu *vcpu, u32 icrl)
icrl & APIC_VECTOR_MASK);
}
+static void avic_kick_vcpu_by_physical_id(struct kvm *kvm, u32 physical_id,
+ u32 icrl)
+{
+ /*
+ * KVM inhibits AVIC if any vCPU ID diverges from the vCPUs APIC ID,
+ * i.e. APIC ID == vCPU ID.
+ */
+ struct kvm_vcpu *target_vcpu = kvm_get_vcpu_by_id(kvm, physical_id);
+
+ /* Once again, nothing to do if the target vCPU doesn't exist. */
+ if (unlikely(!target_vcpu))
+ return;
+
+ avic_kick_vcpu(target_vcpu, icrl);
+}
+
+static void avic_kick_vcpu_by_logical_id(struct kvm *kvm, u32 *avic_logical_id_table,
+ u32 logid_index, u32 icrl)
+{
+ u32 physical_id;
+
+ if (!avic_logical_id_table) {
+ u32 logid_entry = avic_logical_id_table[logid_index];
+
+ /* Nothing to do if the logical destination is invalid. */
+ if (unlikely(!(logid_entry & AVIC_LOGICAL_ID_ENTRY_VALID_MASK)))
+ return;
+
+ physical_id = logid_entry &
+ AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
+ } else {
+ /*
+ * For x2APIC, the logical APIC ID is a read-only value that is
+ * derived from the x2APIC ID, thus the x2APIC ID can be found
+ * by reversing the calculation (stored in logid_index). Note,
+ * bits 31:20 of the x2APIC ID aren't propagated to the logical
+ * ID, but KVM limits the x2APIC ID limited to KVM_MAX_VCPU_IDS.
+ */
+ physical_id = logid_index;
+ }
+
+ avic_kick_vcpu_by_physical_id(kvm, physical_id, icrl);
+}
+
+static bool is_optimized_logical_map_enabled(struct kvm *kvm)
+{
+ struct kvm_apic_map *map;
+ bool enabled;
+
+ rcu_read_lock();
+ map = rcu_dereference(kvm->arch.apic_map);
+ enabled = map && map->logical_mode != KVM_APIC_MODE_MAP_DISABLED;
+ rcu_read_unlock();
+ return enabled;
+}
+
/*
* A fast-path version of avic_kick_target_vcpus(), which attempts to match
* destination APIC ID to vCPU without looping through all vCPUs.
@@ -346,11 +402,10 @@ static void avic_kick_vcpu(struct kvm_vcpu *vcpu, u32 icrl)
static int avic_kick_target_vcpus_fast(struct kvm *kvm, struct kvm_lapic *source,
u32 icrl, u32 icrh, u32 index)
{
- u32 l1_physical_id, dest;
- struct kvm_vcpu *target_vcpu;
int dest_mode = icrl & APIC_DEST_MASK;
int shorthand = icrl & APIC_SHORT_MASK;
struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
+ u32 dest;
if (shorthand != APIC_DEST_NOSHORT)
return -EINVAL;
@@ -367,14 +422,14 @@ static int avic_kick_target_vcpus_fast(struct kvm *kvm, struct kvm_lapic *source
if (!apic_x2apic_mode(source) && dest == APIC_BROADCAST)
return -EINVAL;
- l1_physical_id = dest;
-
- if (WARN_ON_ONCE(l1_physical_id != index))
+ if (WARN_ON_ONCE(dest != index))
return -EINVAL;
+ avic_kick_vcpu_by_physical_id(kvm, dest, icrl);
} else {
- u32 bitmap, cluster;
- int logid_index;
+ u32 *avic_logical_id_table;
+ unsigned long bitmap, i;
+ u32 cluster;
if (apic_x2apic_mode(source)) {
/* 16 bit dest mask, 16 bit cluster id */
@@ -394,50 +449,27 @@ static int avic_kick_target_vcpus_fast(struct kvm *kvm, struct kvm_lapic *source
if (unlikely(!bitmap))
return 0;
- if (!is_power_of_2(bitmap))
- /* multiple logical destinations, use slow path */
+ /*
+ * Use the slow path if more than one bit is set in the bitmap
+ * and KVM's optimized logical map is disabled to avoid kicking
+ * a vCPU multiple times. If the optimized map is disabled, a
+ * vCPU _may_ have multiple bits set in its logical ID, i.e.
+ * may have multiple entries in the logical table.
+ */
+ if (!is_power_of_2(bitmap) &&
+ !is_optimized_logical_map_enabled(kvm))
return -EINVAL;
- logid_index = cluster + __ffs(bitmap);
-
- if (!apic_x2apic_mode(source)) {
- u32 *avic_logical_id_table =
- page_address(kvm_svm->avic_logical_id_table_page);
-
- u32 logid_entry = avic_logical_id_table[logid_index];
-
- if (WARN_ON_ONCE(index != logid_index))
- return -EINVAL;
-
- /* Nothing to do if the logical destination is invalid. */
- if (unlikely(!(logid_entry & AVIC_LOGICAL_ID_ENTRY_VALID_MASK)))
- return 0;
-
- l1_physical_id = logid_entry &
- AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
- } else {
- /*
- * For x2APIC, the logical APIC ID is a read-only value
- * that is derived from the x2APIC ID, thus the x2APIC
- * ID can be found by reversing the calculation (done
- * above). Note, bits 31:20 of the x2APIC ID are not
- * propagated to the logical ID, but KVM limits the
- * x2APIC ID limited to KVM_MAX_VCPU_IDS.
- */
- l1_physical_id = logid_index;
- }
+ if (apic_x2apic_mode(source))
+ avic_logical_id_table = NULL;
+ else
+ avic_logical_id_table = page_address(kvm_svm->avic_logical_id_table_page);
+
+ for_each_set_bit(i, &bitmap, 16)
+ avic_kick_vcpu_by_logical_id(kvm, avic_logical_id_table,
+ cluster + i, icrl);
}
- /*
- * KVM inhibits AVIC if any vCPU ID diverges from the vCPUs APIC ID,
- * i.e. APIC ID == vCPU ID. Once again, nothing to do if the target
- * vCPU doesn't exist.
- */
- target_vcpu = kvm_get_vcpu_by_id(kvm, l1_physical_id);
- if (unlikely(!target_vcpu))
- return 0;
-
- avic_kick_vcpu(target_vcpu, icrl);
return 0;
}
--
2.37.2.672.g94769d06f0-goog
On Wed, 2022-08-31 at 00:35 +0000, Sean Christopherson wrote:
> Iterate over all target logical IDs in the AVIC kick fastpath instead of
> bailing if there is more than one target and KVM's optimized APIC map is
> enabled for logical mode. If the optimized map is enabled, all vCPUs are
> guaranteed to be mapped 1:1 to a logical ID or effectively have logical
> mode disabled, i.e. iterating over the bitmap is guaranteed to kick each
> target exactly once.
>
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
> arch/x86/kvm/svm/avic.c | 126 +++++++++++++++++++++++++---------------
> 1 file changed, 79 insertions(+), 47 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
> index 2095ece70712..dad5affe44c1 100644
> --- a/arch/x86/kvm/svm/avic.c
> +++ b/arch/x86/kvm/svm/avic.c
> @@ -339,6 +339,62 @@ static void avic_kick_vcpu(struct kvm_vcpu *vcpu, u32 icrl)
> icrl & APIC_VECTOR_MASK);
> }
>
> +static void avic_kick_vcpu_by_physical_id(struct kvm *kvm, u32 physical_id,
> + u32 icrl)
> +{
> + /*
> + * KVM inhibits AVIC if any vCPU ID diverges from the vCPUs APIC ID,
> + * i.e. APIC ID == vCPU ID.
> + */
> + struct kvm_vcpu *target_vcpu = kvm_get_vcpu_by_id(kvm, physical_id);
> +
> + /* Once again, nothing to do if the target vCPU doesn't exist. */
> + if (unlikely(!target_vcpu))
> + return;
> +
> + avic_kick_vcpu(target_vcpu, icrl);
> +}
> +
> +static void avic_kick_vcpu_by_logical_id(struct kvm *kvm, u32 *avic_logical_id_table,
> + u32 logid_index, u32 icrl)
> +{
> + u32 physical_id;
> +
> + if (!avic_logical_id_table) {
^ Typo, the '!' shoudn't be there.
> + u32 logid_entry = avic_logical_id_table[logid_index];
> +
> + /* Nothing to do if the logical destination is invalid. */
> + if (unlikely(!(logid_entry & AVIC_LOGICAL_ID_ENTRY_VALID_MASK)))
> + return;
> +
> + physical_id = logid_entry &
> + AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
> + } else {
> + /*
> + * For x2APIC, the logical APIC ID is a read-only value that is
> + * derived from the x2APIC ID, thus the x2APIC ID can be found
> + * by reversing the calculation (stored in logid_index). Note,
> + * bits 31:20 of the x2APIC ID aren't propagated to the logical
> + * ID, but KVM limits the x2APIC ID limited to KVM_MAX_VCPU_IDS.
> + */
> + physical_id = logid_index;
> + }
> +
> + avic_kick_vcpu_by_physical_id(kvm, physical_id, icrl);
> +}
These two functions are a very good cleanup IMHO.
> +
> +static bool is_optimized_logical_map_enabled(struct kvm *kvm)
> +{
> + struct kvm_apic_map *map;
> + bool enabled;
> +
> + rcu_read_lock();
> + map = rcu_dereference(kvm->arch.apic_map);
> + enabled = map && map->logical_mode != KVM_APIC_MODE_MAP_DISABLED;
> + rcu_read_unlock();
> + return enabled;
> +}
This function doesn't belong to avic, it should be in common KVM code.
> +
> /*
> * A fast-path version of avic_kick_target_vcpus(), which attempts to match
> * destination APIC ID to vCPU without looping through all vCPUs.
> @@ -346,11 +402,10 @@ static void avic_kick_vcpu(struct kvm_vcpu *vcpu, u32 icrl)
> static int avic_kick_target_vcpus_fast(struct kvm *kvm, struct kvm_lapic *source,
> u32 icrl, u32 icrh, u32 index)
> {
> - u32 l1_physical_id, dest;
> - struct kvm_vcpu *target_vcpu;
> int dest_mode = icrl & APIC_DEST_MASK;
> int shorthand = icrl & APIC_SHORT_MASK;
> struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
> + u32 dest;
>
> if (shorthand != APIC_DEST_NOSHORT)
> return -EINVAL;
> @@ -367,14 +422,14 @@ static int avic_kick_target_vcpus_fast(struct kvm *kvm, struct kvm_lapic *source
> if (!apic_x2apic_mode(source) && dest == APIC_BROADCAST)
> return -EINVAL;
>
> - l1_physical_id = dest;
> -
> - if (WARN_ON_ONCE(l1_physical_id != index))
> + if (WARN_ON_ONCE(dest != index))
> return -EINVAL;
>
> + avic_kick_vcpu_by_physical_id(kvm, dest, icrl);
> } else {
> - u32 bitmap, cluster;
> - int logid_index;
> + u32 *avic_logical_id_table;
> + unsigned long bitmap, i;
> + u32 cluster;
>
> if (apic_x2apic_mode(source)) {
> /* 16 bit dest mask, 16 bit cluster id */
> @@ -394,50 +449,27 @@ static int avic_kick_target_vcpus_fast(struct kvm *kvm, struct kvm_lapic *source
> if (unlikely(!bitmap))
> return 0;
>
> - if (!is_power_of_2(bitmap))
> - /* multiple logical destinations, use slow path */
> + /*
> + * Use the slow path if more than one bit is set in the bitmap
> + * and KVM's optimized logical map is disabled to avoid kicking
> + * a vCPU multiple times. If the optimized map is disabled, a
> + * vCPU _may_ have multiple bits set in its logical ID, i.e.
> + * may have multiple entries in the logical table.
> + */
> + if (!is_power_of_2(bitmap) &&
> + !is_optimized_logical_map_enabled(kvm))
> return -EINVAL;
I hate to say it but there is another issue here, which I know about for a while
but haven't gotten yet to fix.
The issue is that AVIC's logical to physical map can't cover all the corner cases
that you discovered - it only supports the sane subset: for each cluster, and for each bit
in the mask, it has a physical apic id - so things like logical ids with multiple bits,
having same logical id for multiple vcpus and so on can't work.
In this case we need to either inhibit AVIC (I support this 100%), or clear
its logical ID map, so all logicical IPIs VM exit, and then they can be emulated.
I haven't studied it formally but the code which rebuilds the AVIC's logical ID map
starts at 'avic_handle_ldr_update'.
Besides that this patch makes sense, and it explains why you removed the logic which
was incorrectly checking for having a single bit in the bitmap, but I still
prefer to revert the patch as I explained there.
Best regards,
Maxim Levitsky
>
> - logid_index = cluster + __ffs(bitmap);
> -
> - if (!apic_x2apic_mode(source)) {
> - u32 *avic_logical_id_table =
> - page_address(kvm_svm->avic_logical_id_table_page);
> -
> - u32 logid_entry = avic_logical_id_table[logid_index];
> -
> - if (WARN_ON_ONCE(index != logid_index))
> - return -EINVAL;
> -
> - /* Nothing to do if the logical destination is invalid. */
> - if (unlikely(!(logid_entry & AVIC_LOGICAL_ID_ENTRY_VALID_MASK)))
> - return 0;
> -
> - l1_physical_id = logid_entry &
> - AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
> - } else {
> - /*
> - * For x2APIC, the logical APIC ID is a read-only value
> - * that is derived from the x2APIC ID, thus the x2APIC
> - * ID can be found by reversing the calculation (done
> - * above). Note, bits 31:20 of the x2APIC ID are not
> - * propagated to the logical ID, but KVM limits the
> - * x2APIC ID limited to KVM_MAX_VCPU_IDS.
> - */
> - l1_physical_id = logid_index;
> - }
> + if (apic_x2apic_mode(source))
> + avic_logical_id_table = NULL;
> + else
> + avic_logical_id_table = page_address(kvm_svm->avic_logical_id_table_page);
> +
> + for_each_set_bit(i, &bitmap, 16)
> + avic_kick_vcpu_by_logical_id(kvm, avic_logical_id_table,
> + cluster + i, icrl);
> }
>
> - /*
> - * KVM inhibits AVIC if any vCPU ID diverges from the vCPUs APIC ID,
> - * i.e. APIC ID == vCPU ID. Once again, nothing to do if the target
> - * vCPU doesn't exist.
> - */
> - target_vcpu = kvm_get_vcpu_by_id(kvm, l1_physical_id);
> - if (unlikely(!target_vcpu))
> - return 0;
> -
> - avic_kick_vcpu(target_vcpu, icrl);
> return 0;
> }
>
On Wed, Aug 31, 2022, Maxim Levitsky wrote:
> On Wed, 2022-08-31 at 00:35 +0000, Sean Christopherson wrote:
> > +static void avic_kick_vcpu_by_logical_id(struct kvm *kvm, u32 *avic_logical_id_table,
> > + u32 logid_index, u32 icrl)
> > +{
> > + u32 physical_id;
> > +
> > + if (!avic_logical_id_table) {
> ^ Typo, the '!' shoudn't be there.
Ouch. I suspect the tests pass because this just ends up routing events through
the slow path. I try to concoct a testcase to expose this bug.
> > +static bool is_optimized_logical_map_enabled(struct kvm *kvm)
> > +{
> > + struct kvm_apic_map *map;
> > + bool enabled;
> > +
> > + rcu_read_lock();
> > + map = rcu_dereference(kvm->arch.apic_map);
> > + enabled = map && map->logical_mode != KVM_APIC_MODE_MAP_DISABLED;
> > + rcu_read_unlock();
> > + return enabled;
> > +}
>
> This function doesn't belong to avic, it should be in common KVM code.
I'll move it. I'm not expecting any additional users, but I agree it belongs
elsewhere. Actually, might be a moot point (see below).
> > @@ -394,50 +449,27 @@ static int avic_kick_target_vcpus_fast(struct kvm *kvm, struct kvm_lapic *source
> > if (unlikely(!bitmap))
> > return 0;
> >
> > - if (!is_power_of_2(bitmap))
> > - /* multiple logical destinations, use slow path */
> > + /*
> > + * Use the slow path if more than one bit is set in the bitmap
> > + * and KVM's optimized logical map is disabled to avoid kicking
> > + * a vCPU multiple times. If the optimized map is disabled, a
> > + * vCPU _may_ have multiple bits set in its logical ID, i.e.
> > + * may have multiple entries in the logical table.
> > + */
> > + if (!is_power_of_2(bitmap) &&
> > + !is_optimized_logical_map_enabled(kvm))
> > return -EINVAL;
>
>
> I hate to say it but there is another issue here, which I know about for a while
> but haven't gotten yet to fix.
>
> The issue is that AVIC's logical to physical map can't cover all the corner cases
> that you discovered - it only supports the sane subset: for each cluster, and for each bit
> in the mask, it has a physical apic id - so things like logical ids with multiple bits,
> having same logical id for multiple vcpus and so on can't work.
>
> In this case we need to either inhibit AVIC (I support this 100%),
I like the idea of inhibiting.
> or clear its logical ID map, so all logicical IPIs VM exit, and then they
> can be emulated.
>
> I haven't studied it formally but the code which rebuilds the AVIC's logical ID map
> starts at 'avic_handle_ldr_update'.
I suspected there are issues here, but the new tests passed (somewhat surprisingly)
so I stopped trying to decipher the AVIC LDR handling.
Eww. And the VM-Exit trap logic is broken too. If the guest updates and disables
its LDR, SVM returns immediately and doesn't call into common APIC code, i.e. doesn't
recalc the optimized map. E.g. if the guest clears its LDR, the optimized map will
be left as is and the vCPU will receive interrupts using its old LDR.
case APIC_LDR:
if (avic_handle_ldr_update(vcpu))
return 0;
break;
Rather than handling this purely in AVIC code, what if we a key off of
the optimized map being enabled? E.g. drop the return from avic_handle_ldr_update()
and in the kvm_recalculate_apic_map() do:
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 3b6ef36b3963..6e188010b614 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -364,6 +364,11 @@ void kvm_recalculate_apic_map(struct kvm *kvm)
cluster[ldr] = apic;
}
out:
+ if (!new || new->logical_mode == KVM_APIC_MODE_MAP_DISABLED)
+ kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_LOGICAL_MAP_DISABLED);
+ else
+ kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_LOGICAL_MAP_DISABLED);
+
old = rcu_dereference_protected(kvm->arch.apic_map,
lockdep_is_held(&kvm->arch.apic_map_lock));
rcu_assign_pointer(kvm->arch.apic_map, new);
On Wed, 2022-08-31 at 18:19 +0000, Sean Christopherson wrote:
> On Wed, Aug 31, 2022, Maxim Levitsky wrote:
> > On Wed, 2022-08-31 at 00:35 +0000, Sean Christopherson wrote:
> > > +static void avic_kick_vcpu_by_logical_id(struct kvm *kvm, u32 *avic_logical_id_table,
> > > + u32 logid_index, u32 icrl)
> > > +{
> > > + u32 physical_id;
> > > +
> > > + if (!avic_logical_id_table) {
> > ^ Typo, the '!' shoudn't be there.
>
> Ouch. I suspect the tests pass because this just ends up routing events through
> the slow path. I try to concoct a testcase to expose this bug.
>
> > > +static bool is_optimized_logical_map_enabled(struct kvm *kvm)
> > > +{
> > > + struct kvm_apic_map *map;
> > > + bool enabled;
> > > +
> > > + rcu_read_lock();
> > > + map = rcu_dereference(kvm->arch.apic_map);
> > > + enabled = map && map->logical_mode != KVM_APIC_MODE_MAP_DISABLED;
> > > + rcu_read_unlock();
> > > + return enabled;
> > > +}
> >
> > This function doesn't belong to avic, it should be in common KVM code.
>
> I'll move it. I'm not expecting any additional users, but I agree it belongs
> elsewhere. Actually, might be a moot point (see below).
>
> > > @@ -394,50 +449,27 @@ static int avic_kick_target_vcpus_fast(struct kvm *kvm, struct kvm_lapic *source
> > > if (unlikely(!bitmap))
> > > return 0;
> > >
> > > - if (!is_power_of_2(bitmap))
> > > - /* multiple logical destinations, use slow path */
> > > + /*
> > > + * Use the slow path if more than one bit is set in the bitmap
> > > + * and KVM's optimized logical map is disabled to avoid kicking
> > > + * a vCPU multiple times. If the optimized map is disabled, a
> > > + * vCPU _may_ have multiple bits set in its logical ID, i.e.
> > > + * may have multiple entries in the logical table.
> > > + */
> > > + if (!is_power_of_2(bitmap) &&
> > > + !is_optimized_logical_map_enabled(kvm))
> > > return -EINVAL;
> >
> > I hate to say it but there is another issue here, which I know about for a while
> > but haven't gotten yet to fix.
> >
> > The issue is that AVIC's logical to physical map can't cover all the corner cases
> > that you discovered - it only supports the sane subset: for each cluster, and for each bit
> > in the mask, it has a physical apic id - so things like logical ids with multiple bits,
> > having same logical id for multiple vcpus and so on can't work.
> >
> > In this case we need to either inhibit AVIC (I support this 100%),
>
> I like the idea of inhibiting.
>
> > or clear its logical ID map, so all logicical IPIs VM exit, and then they
> > can be emulated.
> >
> > I haven't studied it formally but the code which rebuilds the AVIC's logical ID map
> > starts at 'avic_handle_ldr_update'.
>
> I suspected there are issues here, but the new tests passed (somewhat surprisingly)
> so I stopped trying to decipher the AVIC LDR handling.
>
> Eww. And the VM-Exit trap logic is broken too. If the guest updates and disables
> its LDR, SVM returns immediately and doesn't call into common APIC code, i.e. doesn't
> recalc the optimized map. E.g. if the guest clears its LDR, the optimized map will
> be left as is and the vCPU will receive interrupts using its old LDR.
>
> case APIC_LDR:
> if (avic_handle_ldr_update(vcpu))
> return 0;
> break;
>
> Rather than handling this purely in AVIC code, what if we a key off of
> the optimized map being enabled? E.g. drop the return from avic_handle_ldr_update()
> and in the kvm_recalculate_apic_map() do:
>
> diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
> index 3b6ef36b3963..6e188010b614 100644
> --- a/arch/x86/kvm/lapic.c
> +++ b/arch/x86/kvm/lapic.c
> @@ -364,6 +364,11 @@ void kvm_recalculate_apic_map(struct kvm *kvm)
> cluster[ldr] = apic;
> }
> out:
> + if (!new || new->logical_mode == KVM_APIC_MODE_MAP_DISABLED)
> + kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_LOGICAL_MAP_DISABLED);
> + else
> + kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_LOGICAL_MAP_DISABLED);
> +
This looks very good, it will even work on APICv, because the 'check_apicv_inhibit_reasons'
will not return true for this new reason (APICv IPIv I think doesn't deal with logical destination at all);
Best regards,
Maxim Levitsky
> old = rcu_dereference_protected(kvm->arch.apic_map,
> lockdep_is_held(&kvm->arch.apic_map_lock));
> rcu_assign_pointer(kvm->arch.apic_map, new);
>
© 2016 - 2026 Red Hat, Inc.