On 04/10/2024 16:27, Steven Price wrote:
> The RMM provides emulation of a VGIC to the realm guest but delegates
> much of the handling to the host. Implement support in KVM for
> saving/restoring state to/from the REC structure.
>
> Signed-off-by: Steven Price <steven.price@arm.com>
> ---
> v5: More changes to adapt to rebasing.
> v3: Changes to adapt to rebasing only.
> ---
> arch/arm64/kvm/arm.c | 15 ++++++++++---
> arch/arm64/kvm/vgic/vgic-v3.c | 8 ++++++-
> arch/arm64/kvm/vgic/vgic.c | 41 +++++++++++++++++++++++++++++++++--
> 3 files changed, 58 insertions(+), 6 deletions(-)
>
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 87aa3f07fae2..ecce40a35cd0 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -687,19 +687,24 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>
> void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
> {
> + kvm_timer_vcpu_put(vcpu);
> + kvm_vgic_put(vcpu);
> +
> + vcpu->cpu = -1;
> +
> + if (vcpu_is_rec(vcpu))
> + return;
> +
> kvm_arch_vcpu_put_debug_state_flags(vcpu);
> kvm_arch_vcpu_put_fp(vcpu);
> if (has_vhe())
> kvm_vcpu_put_vhe(vcpu);
> - kvm_timer_vcpu_put(vcpu);
> - kvm_vgic_put(vcpu);
> kvm_vcpu_pmu_restore_host(vcpu);
> if (vcpu_has_nv(vcpu))
> kvm_vcpu_put_hw_mmu(vcpu);
> kvm_arm_vmid_clear_active();
>
> vcpu_clear_on_unsupported_cpu(vcpu);
> - vcpu->cpu = -1;
> }
>
> static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
> @@ -907,6 +912,10 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
> }
>
> if (!irqchip_in_kernel(kvm)) {
> + /* Userspace irqchip not yet supported with Realms */
> + if (kvm_is_realm(vcpu->kvm))
> + return -EOPNOTSUPP;
> +
> /*
> * Tell the rest of the code that there are userspace irqchip
> * VMs in the wild.
> diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
> index b217b256853c..ce782f8524cf 100644
> --- a/arch/arm64/kvm/vgic/vgic-v3.c
> +++ b/arch/arm64/kvm/vgic/vgic-v3.c
> @@ -7,9 +7,11 @@
> #include <linux/kvm.h>
> #include <linux/kvm_host.h>
> #include <kvm/arm_vgic.h>
> +#include <asm/kvm_emulate.h>
> #include <asm/kvm_hyp.h>
> #include <asm/kvm_mmu.h>
> #include <asm/kvm_asm.h>
> +#include <asm/rmi_smc.h>
>
> #include "vgic.h"
>
> @@ -679,7 +681,8 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
> (unsigned long long)info->vcpu.start);
> } else if (kvm_get_mode() != KVM_MODE_PROTECTED) {
> kvm_vgic_global_state.vcpu_base = info->vcpu.start;
> - kvm_vgic_global_state.can_emulate_gicv2 = true;
> + if (!static_branch_unlikely(&kvm_rme_is_available))
> + kvm_vgic_global_state.can_emulate_gicv2 = true;
We could avoid this restriction for normal VMs by adding a check in
kvm_vgic_create() ?
> ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
> if (ret) {
> kvm_err("Cannot register GICv2 KVM device.\n");
> @@ -746,6 +749,9 @@ void vgic_v3_put(struct kvm_vcpu *vcpu)
> {
> struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
>
> + if (vcpu_is_rec(vcpu))
> + cpu_if->vgic_vmcr = vcpu->arch.rec.run->exit.gicv3_vmcr;
> +
> kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if);
> WARN_ON(vgic_v4_put(vcpu));
>
> diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
> index f50274fd5581..78bf9840a557 100644
> --- a/arch/arm64/kvm/vgic/vgic.c
> +++ b/arch/arm64/kvm/vgic/vgic.c
> @@ -10,7 +10,9 @@
> #include <linux/list_sort.h>
> #include <linux/nospec.h>
>
> +#include <asm/kvm_emulate.h>
> #include <asm/kvm_hyp.h>
> +#include <asm/rmi_smc.h>
>
> #include "vgic.h"
>
> @@ -848,10 +850,23 @@ static inline bool can_access_vgic_from_kernel(void)
> return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe();
> }
>
> +static inline void vgic_rmm_save_state(struct kvm_vcpu *vcpu)
> +{
> + struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
> + int i;
> +
> + for (i = 0; i < kvm_vgic_global_state.nr_lr; i++) {
I believe we should limit the number of LRs that KVM processes for a
given REC VCPU to that of the limit imposed by RMM
(RMI_FEATURE_REGISTER_0_GICV3_NUM_LRS).
Otherwise, theoretically we could loose interrupts for a Realm VM.
e.g., KVM populates the maximum vgic_nr_lrs to rec_run. But RMM
on rec exit, populates only the "number" of LRs from above and thus
KVM could loose the remaining LRs and thus never injected into the Realm.
The rest looks good to me.
Suzuki
> + cpu_if->vgic_lr[i] = vcpu->arch.rec.run->exit.gicv3_lrs[i];
> + vcpu->arch.rec.run->enter.gicv3_lrs[i] = 0;
> + }
> +}
> +
> static inline void vgic_save_state(struct kvm_vcpu *vcpu)
> {
> if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
> vgic_v2_save_state(vcpu);
> + else if (vcpu_is_rec(vcpu))
> + vgic_rmm_save_state(vcpu);
> else
> __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
> }
> @@ -878,10 +893,28 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
> vgic_prune_ap_list(vcpu);
> }
>
> +static inline void vgic_rmm_restore_state(struct kvm_vcpu *vcpu)
> +{
> + struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
> + int i;
> +
> + for (i = 0; i < kvm_vgic_global_state.nr_lr; i++) {
> + vcpu->arch.rec.run->enter.gicv3_lrs[i] = cpu_if->vgic_lr[i];
> + /*
> + * Also populate the rec.run->exit copies so that a late
> + * decision to back out from entering the realm doesn't cause
> + * the state to be lost
> + */
> + vcpu->arch.rec.run->exit.gicv3_lrs[i] = cpu_if->vgic_lr[i];
> + }
> +}
> +
> static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
> {
> if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
> vgic_v2_restore_state(vcpu);
> + else if (vcpu_is_rec(vcpu))
> + vgic_rmm_restore_state(vcpu);
> else
> __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
> }
> @@ -922,7 +955,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
>
> void kvm_vgic_load(struct kvm_vcpu *vcpu)
> {
> - if (unlikely(!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))) {
> + if (unlikely(!irqchip_in_kernel(vcpu->kvm) ||
> + !vgic_initialized(vcpu->kvm)) ||
> + vcpu_is_rec(vcpu)) {
> if (has_vhe() && static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
> __vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
> return;
> @@ -936,7 +971,9 @@ void kvm_vgic_load(struct kvm_vcpu *vcpu)
>
> void kvm_vgic_put(struct kvm_vcpu *vcpu)
> {
> - if (unlikely(!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))) {
> + if (unlikely(!irqchip_in_kernel(vcpu->kvm) ||
> + !vgic_initialized(vcpu->kvm)) ||
> + vcpu_is_rec(vcpu)) {
> if (has_vhe() && static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
> __vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
> return;