The RMM keeps track of the timer while the realm REC is running, but on
exit to the normal world KVM is responsible for handling the timers.
A later patch adds the support for propagating the timer values from the
exit data structure and calling kvm_realm_timers_update().
Signed-off-by: Steven Price <steven.price@arm.com>
---
Changes since v11:
* Drop the kvm_is_realm() check from timer_set_offset(). We already
ensure that the offset is 0 when calling the function.
Changes since v10:
* KVM_CAP_COUNTER_OFFSET is now already hidden by a previous patch.
Changes since v9:
* No need to move the call to kvm_timer_unblocking() in
kvm_timer_vcpu_load().
Changes since v7:
* Hide KVM_CAP_COUNTER_OFFSET for realm guests.
---
arch/arm64/kvm/arch_timer.c | 37 ++++++++++++++++++++++++++++++++++--
include/kvm/arm_arch_timer.h | 2 ++
2 files changed, 37 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 99a07972068d..99308bde2a05 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -453,6 +453,21 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
timer_ctx);
}
+void kvm_realm_timers_update(struct kvm_vcpu *vcpu)
+{
+ struct arch_timer_cpu *arch_timer = &vcpu->arch.timer_cpu;
+ int i;
+
+ for (i = 0; i < NR_KVM_EL0_TIMERS; i++) {
+ struct arch_timer_context *timer = &arch_timer->timers[i];
+ bool status = timer_get_ctl(timer) & ARCH_TIMER_CTRL_IT_STAT;
+ bool level = kvm_timer_irq_can_fire(timer) && status;
+
+ if (level != timer->irq.level)
+ kvm_timer_update_irq(vcpu, level, timer);
+ }
+}
+
/* Only called for a fully emulated timer */
static void timer_emulate(struct arch_timer_context *ctx)
{
@@ -1056,7 +1071,9 @@ static void timer_context_init(struct kvm_vcpu *vcpu, int timerid)
ctxt->timer_id = timerid;
- if (timerid == TIMER_VTIMER)
+ if (kvm_is_realm(vcpu->kvm))
+ ctxt->offset.vm_offset = NULL;
+ else if (timerid == TIMER_VTIMER)
ctxt->offset.vm_offset = &kvm->arch.timer_data.voffset;
else
ctxt->offset.vm_offset = &kvm->arch.timer_data.poffset;
@@ -1078,13 +1095,19 @@ static void timer_context_init(struct kvm_vcpu *vcpu, int timerid)
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
+ u64 cntvoff;
for (int i = 0; i < NR_KVM_TIMERS; i++)
timer_context_init(vcpu, i);
+ if (kvm_is_realm(vcpu->kvm))
+ cntvoff = 0;
+ else
+ cntvoff = kvm_phys_timer_read();
+
/* Synchronize offsets across timers of a VM if not already provided */
if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags)) {
- timer_set_offset(vcpu_vtimer(vcpu), kvm_phys_timer_read());
+ timer_set_offset(vcpu_vtimer(vcpu), cntvoff);
timer_set_offset(vcpu_ptimer(vcpu), 0);
}
@@ -1556,6 +1579,13 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
return -EINVAL;
}
+ /*
+ * We don't use mapped IRQs for Realms because the RMI doesn't allow
+ * us setting the LR.HW bit in the VGIC.
+ */
+ if (vcpu_is_rec(vcpu))
+ return 0;
+
get_timer_map(vcpu, &map);
ret = kvm_vgic_map_phys_irq(vcpu,
@@ -1687,6 +1717,9 @@ int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
if (offset->reserved)
return -EINVAL;
+ if (kvm_is_realm(kvm))
+ return -EINVAL;
+
mutex_lock(&kvm->lock);
if (!kvm_trylock_all_vcpus(kvm)) {
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index 7310841f4512..bab0daafc6b1 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -111,6 +111,8 @@ int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
+void kvm_realm_timers_update(struct kvm_vcpu *vcpu);
+
u64 kvm_phys_timer_read(void);
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu);
--
2.43.0