Simplify the error paths in snp_launch_update() by using a mutex guard,
allowing early return instead of using gotos.
Signed-off-by: Carlos López <clopez@suse.de>
---
arch/x86/kvm/svm/sev.c | 32 +++++++++++++-------------------
1 file changed, 13 insertions(+), 19 deletions(-)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index f59c65abe3cf..1b325ae61d15 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -8,6 +8,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/cleanup.h>
#include <linux/kvm_types.h>
#include <linux/kvm_host.h>
#include <linux/kernel.h>
@@ -2367,7 +2368,6 @@ static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp)
struct kvm_memory_slot *memslot;
long npages, count;
void __user *src;
- int ret = 0;
if (!sev_snp_guest(kvm) || !sev->snp_context)
return -EINVAL;
@@ -2407,13 +2407,11 @@ static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp)
* initial expected state and better guard against unexpected
* situations.
*/
- mutex_lock(&kvm->slots_lock);
+ guard(mutex)(&kvm->slots_lock);
memslot = gfn_to_memslot(kvm, params.gfn_start);
- if (!kvm_slot_has_gmem(memslot)) {
- ret = -EINVAL;
- goto out;
- }
+ if (!kvm_slot_has_gmem(memslot))
+ return -EINVAL;
sev_populate_args.sev_fd = argp->sev_fd;
sev_populate_args.type = params.type;
@@ -2425,22 +2423,18 @@ static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp)
argp->error = sev_populate_args.fw_error;
pr_debug("%s: kvm_gmem_populate failed, ret %ld (fw_error %d)\n",
__func__, count, argp->error);
- ret = -EIO;
- } else {
- params.gfn_start += count;
- params.len -= count * PAGE_SIZE;
- if (params.type != KVM_SEV_SNP_PAGE_TYPE_ZERO)
- params.uaddr += count * PAGE_SIZE;
-
- ret = 0;
- if (copy_to_user(u64_to_user_ptr(argp->data), ¶ms, sizeof(params)))
- ret = -EFAULT;
+ return -EIO;
}
-out:
- mutex_unlock(&kvm->slots_lock);
+ params.gfn_start += count;
+ params.len -= count * PAGE_SIZE;
+ if (params.type != KVM_SEV_SNP_PAGE_TYPE_ZERO)
+ params.uaddr += count * PAGE_SIZE;
- return ret;
+ if (copy_to_user(u64_to_user_ptr(argp->data), ¶ms, sizeof(params)))
+ return -EFAULT;
+
+ return 0;
}
static int snp_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
--
2.51.0
> Simplify the error paths in snp_launch_update() by using a mutex guard,
> allowing early return instead of using gotos.
>
> Signed-off-by: Carlos López <clopez@suse.de>
> ---
> arch/x86/kvm/svm/sev.c | 32 +++++++++++++-------------------
> 1 file changed, 13 insertions(+), 19 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index f59c65abe3cf..1b325ae61d15 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -8,6 +8,7 @@
> */
> #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
>
> +#include <linux/cleanup.h>
This does not seem to be required, as compiling without this as well.
Otherwise looks fine:
Reviewed-by: Pankaj Gupta <pankaj.gupta@amd.com>
> #include <linux/kvm_types.h>
> #include <linux/kvm_host.h>
> #include <linux/kernel.h>
> @@ -2367,7 +2368,6 @@ static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp)
> struct kvm_memory_slot *memslot;
> long npages, count;
> void __user *src;
> - int ret = 0;
>
> if (!sev_snp_guest(kvm) || !sev->snp_context)
> return -EINVAL;
> @@ -2407,13 +2407,11 @@ static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp)
> * initial expected state and better guard against unexpected
> * situations.
> */
> - mutex_lock(&kvm->slots_lock);
> + guard(mutex)(&kvm->slots_lock);
>
> memslot = gfn_to_memslot(kvm, params.gfn_start);
> - if (!kvm_slot_has_gmem(memslot)) {
> - ret = -EINVAL;
> - goto out;
> - }
> + if (!kvm_slot_has_gmem(memslot))
> + return -EINVAL;
>
> sev_populate_args.sev_fd = argp->sev_fd;
> sev_populate_args.type = params.type;
> @@ -2425,22 +2423,18 @@ static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp)
> argp->error = sev_populate_args.fw_error;
> pr_debug("%s: kvm_gmem_populate failed, ret %ld (fw_error %d)\n",
> __func__, count, argp->error);
> - ret = -EIO;
> - } else {
> - params.gfn_start += count;
> - params.len -= count * PAGE_SIZE;
> - if (params.type != KVM_SEV_SNP_PAGE_TYPE_ZERO)
> - params.uaddr += count * PAGE_SIZE;
> -
> - ret = 0;
> - if (copy_to_user(u64_to_user_ptr(argp->data), ¶ms, sizeof(params)))
> - ret = -EFAULT;
> + return -EIO;
> }
>
> -out:
> - mutex_unlock(&kvm->slots_lock);
> + params.gfn_start += count;
> + params.len -= count * PAGE_SIZE;
> + if (params.type != KVM_SEV_SNP_PAGE_TYPE_ZERO)
> + params.uaddr += count * PAGE_SIZE;
>
> - return ret;
> + if (copy_to_user(u64_to_user_ptr(argp->data), ¶ms, sizeof(params)))
> + return -EFAULT;
> +
> + return 0;
> }
>
> static int snp_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
© 2016 - 2026 Red Hat, Inc.