[PATCH 05/10] KVM: arm64: Use guard(hyp_spinlock) in pkvm.c

Fuad Tabba posted 10 patches 3 weeks ago
[PATCH 05/10] KVM: arm64: Use guard(hyp_spinlock) in pkvm.c
Posted by Fuad Tabba 3 weeks ago
Migrate manual hyp_spin_lock() and hyp_spin_unlock() calls managing
the global vm_table_lock to use the guard(hyp_spinlock) macro.

This significantly cleans up validation and error paths during VM
creation and manipulation by eliminating the need for goto labels
and manual unlock procedures on early returns.

Change-Id: I894df69b3cfe053a77dd660dfb70c95640c6d70c
Signed-off-by: Fuad Tabba <tabba@google.com>
---
 arch/arm64/kvm/hyp/nvhe/pkvm.c | 122 +++++++++++++++++------------------------
 1 file changed, 51 insertions(+), 71 deletions(-)

diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 2f029bfe4755..8f901fdead89 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -253,28 +253,23 @@ struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
 	if (__this_cpu_read(loaded_hyp_vcpu))
 		return NULL;
 
-	hyp_spin_lock(&vm_table_lock);
+	guard(hyp_spinlock)(&vm_table_lock);
 	hyp_vm = get_vm_by_handle(handle);
 	if (!hyp_vm || hyp_vm->kvm.created_vcpus <= vcpu_idx)
-		goto unlock;
+		return NULL;
 
 	hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
 	if (!hyp_vcpu)
-		goto unlock;
+		return NULL;
 
 	/* Ensure vcpu isn't loaded on more than one cpu simultaneously. */
-	if (unlikely(hyp_vcpu->loaded_hyp_vcpu)) {
-		hyp_vcpu = NULL;
-		goto unlock;
-	}
+	if (unlikely(hyp_vcpu->loaded_hyp_vcpu))
+		return NULL;
 
 	hyp_vcpu->loaded_hyp_vcpu = this_cpu_ptr(&loaded_hyp_vcpu);
 	hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
-unlock:
-	hyp_spin_unlock(&vm_table_lock);
 
-	if (hyp_vcpu)
-		__this_cpu_write(loaded_hyp_vcpu, hyp_vcpu);
+	__this_cpu_write(loaded_hyp_vcpu, hyp_vcpu);
 	return hyp_vcpu;
 }
 
@@ -282,11 +277,10 @@ void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
 {
 	struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
 
-	hyp_spin_lock(&vm_table_lock);
+	guard(hyp_spinlock)(&vm_table_lock);
 	hyp_vcpu->loaded_hyp_vcpu = NULL;
 	__this_cpu_write(loaded_hyp_vcpu, NULL);
 	hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
-	hyp_spin_unlock(&vm_table_lock);
 }
 
 struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void)
@@ -299,20 +293,18 @@ struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle)
 {
 	struct pkvm_hyp_vm *hyp_vm;
 
-	hyp_spin_lock(&vm_table_lock);
+	guard(hyp_spinlock)(&vm_table_lock);
 	hyp_vm = get_vm_by_handle(handle);
 	if (hyp_vm)
 		hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
-	hyp_spin_unlock(&vm_table_lock);
 
 	return hyp_vm;
 }
 
 void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm)
 {
-	hyp_spin_lock(&vm_table_lock);
+	guard(hyp_spinlock)(&vm_table_lock);
 	hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
-	hyp_spin_unlock(&vm_table_lock);
 }
 
 struct pkvm_hyp_vm *get_np_pkvm_hyp_vm(pkvm_handle_t handle)
@@ -613,9 +605,8 @@ static int insert_vm_table_entry(pkvm_handle_t handle,
 {
 	int ret;
 
-	hyp_spin_lock(&vm_table_lock);
+	guard(hyp_spinlock)(&vm_table_lock);
 	ret = __insert_vm_table_entry(handle, hyp_vm);
-	hyp_spin_unlock(&vm_table_lock);
 
 	return ret;
 }
@@ -692,9 +683,8 @@ int __pkvm_reserve_vm(void)
 {
 	int ret;
 
-	hyp_spin_lock(&vm_table_lock);
+	guard(hyp_spinlock)(&vm_table_lock);
 	ret = allocate_vm_table_entry();
-	hyp_spin_unlock(&vm_table_lock);
 
 	if (ret < 0)
 		return ret;
@@ -713,10 +703,9 @@ void __pkvm_unreserve_vm(pkvm_handle_t handle)
 	if (unlikely(!vm_table))
 		return;
 
-	hyp_spin_lock(&vm_table_lock);
+	guard(hyp_spinlock)(&vm_table_lock);
 	if (likely(idx < KVM_MAX_PVMS && vm_table[idx] == RESERVED_ENTRY))
 		remove_vm_table_entry(handle);
-	hyp_spin_unlock(&vm_table_lock);
 }
 
 /*
@@ -815,35 +804,35 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
 	if (!hyp_vcpu)
 		return -ENOMEM;
 
-	hyp_spin_lock(&vm_table_lock);
+	scoped_guard(hyp_spinlock, &vm_table_lock) {
+		hyp_vm = get_vm_by_handle(handle);
+		if (!hyp_vm) {
+			ret = -ENOENT;
+			goto err_unmap;
+		}
 
-	hyp_vm = get_vm_by_handle(handle);
-	if (!hyp_vm) {
-		ret = -ENOENT;
-		goto unlock;
+		ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu);
+		if (ret)
+			goto err_unmap;
+
+		idx = hyp_vcpu->vcpu.vcpu_idx;
+		if (idx >= hyp_vm->kvm.created_vcpus) {
+			ret = -EINVAL;
+			goto err_unmap;
+		}
+
+		if (hyp_vm->vcpus[idx]) {
+			ret = -EINVAL;
+			goto err_unmap;
+		}
+
+		hyp_vm->vcpus[idx] = hyp_vcpu;
+
+		return 0;
 	}
 
-	ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu);
-	if (ret)
-		goto unlock;
-
-	idx = hyp_vcpu->vcpu.vcpu_idx;
-	if (idx >= hyp_vm->kvm.created_vcpus) {
-		ret = -EINVAL;
-		goto unlock;
-	}
-
-	if (hyp_vm->vcpus[idx]) {
-		ret = -EINVAL;
-		goto unlock;
-	}
-
-	hyp_vm->vcpus[idx] = hyp_vcpu;
-unlock:
-	hyp_spin_unlock(&vm_table_lock);
-
-	if (ret)
-		unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
+err_unmap:
+	unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
 	return ret;
 }
 
@@ -866,27 +855,22 @@ int __pkvm_teardown_vm(pkvm_handle_t handle)
 	struct kvm *host_kvm;
 	unsigned int idx;
 	size_t vm_size;
-	int err;
 
-	hyp_spin_lock(&vm_table_lock);
-	hyp_vm = get_vm_by_handle(handle);
-	if (!hyp_vm) {
-		err = -ENOENT;
-		goto err_unlock;
+	scoped_guard(hyp_spinlock, &vm_table_lock) {
+		hyp_vm = get_vm_by_handle(handle);
+		if (!hyp_vm)
+			return -ENOENT;
+
+		if (WARN_ON(hyp_page_count(hyp_vm)))
+			return -EBUSY;
+
+		host_kvm = hyp_vm->host_kvm;
+
+		/* Ensure the VMID is clean before it can be reallocated */
+		__kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
+		remove_vm_table_entry(handle);
 	}
 
-	if (WARN_ON(hyp_page_count(hyp_vm))) {
-		err = -EBUSY;
-		goto err_unlock;
-	}
-
-	host_kvm = hyp_vm->host_kvm;
-
-	/* Ensure the VMID is clean before it can be reallocated */
-	__kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
-	remove_vm_table_entry(handle);
-	hyp_spin_unlock(&vm_table_lock);
-
 	/* Reclaim guest pages (including page-table pages) */
 	mc = &host_kvm->arch.pkvm.teardown_mc;
 	stage2_mc = &host_kvm->arch.pkvm.stage2_teardown_mc;
@@ -917,8 +901,4 @@ int __pkvm_teardown_vm(pkvm_handle_t handle)
 	teardown_donated_memory(mc, hyp_vm, vm_size);
 	hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
 	return 0;
-
-err_unlock:
-	hyp_spin_unlock(&vm_table_lock);
-	return err;
 }

-- 
2.53.0.851.ga537e3e6e9-goog
Re: [PATCH 05/10] KVM: arm64: Use guard(hyp_spinlock) in pkvm.c
Posted by Jonathan Cameron 2 weeks, 6 days ago
On Mon, 16 Mar 2026 17:35:26 +0000
Fuad Tabba <tabba@google.com> wrote:

> Migrate manual hyp_spin_lock() and hyp_spin_unlock() calls managing
> the global vm_table_lock to use the guard(hyp_spinlock) macro.
> 
> This significantly cleans up validation and error paths during VM
> creation and manipulation by eliminating the need for goto labels
> and manual unlock procedures on early returns.
> 
> Change-Id: I894df69b3cfe053a77dd660dfb70c95640c6d70c
> Signed-off-by: Fuad Tabba <tabba@google.com>
> ---

>  
>  struct pkvm_hyp_vm *get_np_pkvm_hyp_vm(pkvm_handle_t handle)
> @@ -613,9 +605,8 @@ static int insert_vm_table_entry(pkvm_handle_t handle,
>  {
>  	int ret;
>  
> -	hyp_spin_lock(&vm_table_lock);
> +	guard(hyp_spinlock)(&vm_table_lock);
>  	ret = __insert_vm_table_entry(handle, hyp_vm);
> -	hyp_spin_unlock(&vm_table_lock);
>  
>  	return ret;

	return __insert_vm_table_entry();

>  }

> @@ -815,35 +804,35 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
>  	if (!hyp_vcpu)
>  		return -ENOMEM;
>  
> -	hyp_spin_lock(&vm_table_lock);
> +	scoped_guard(hyp_spinlock, &vm_table_lock) {
> +		hyp_vm = get_vm_by_handle(handle);
> +		if (!hyp_vm) {
> +			ret = -ENOENT;
> +			goto err_unmap;

As in earlier patch.  I'd not mix gotos and guard()s.

> +		}
>  
> -	hyp_vm = get_vm_by_handle(handle);
> -	if (!hyp_vm) {
> -		ret = -ENOENT;
> -		goto unlock;
> +		ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu);
> +		if (ret)
> +			goto err_unmap;
> +
> +		idx = hyp_vcpu->vcpu.vcpu_idx;
> +		if (idx >= hyp_vm->kvm.created_vcpus) {
> +			ret = -EINVAL;
> +			goto err_unmap;
> +		}
> +
> +		if (hyp_vm->vcpus[idx]) {
> +			ret = -EINVAL;
> +			goto err_unmap;
> +		}
> +
> +		hyp_vm->vcpus[idx] = hyp_vcpu;
> +
> +		return 0;
>  	}
>  
> -	ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu);
> -	if (ret)
> -		goto unlock;
> -
> -	idx = hyp_vcpu->vcpu.vcpu_idx;
> -	if (idx >= hyp_vm->kvm.created_vcpus) {
> -		ret = -EINVAL;
> -		goto unlock;
> -	}
> -
> -	if (hyp_vm->vcpus[idx]) {
> -		ret = -EINVAL;
> -		goto unlock;
> -	}
> -
> -	hyp_vm->vcpus[idx] = hyp_vcpu;
> -unlock:
> -	hyp_spin_unlock(&vm_table_lock);
> -
> -	if (ret)
> -		unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
> +err_unmap:
> +	unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
>  	return ret;
>  }
>  
>