There is not much value in checking if a memslot is writable explicitly
before a write as it may change underneath after the check. Rather, return
invalid address error when write_guest fails as it checks if the slot
is writable anyways.
Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Atish Patra <atishp@rivosinc.com>
---
arch/riscv/kvm/vcpu_pmu.c | 11 ++---------
arch/riscv/kvm/vcpu_sbi_sta.c | 9 ++-------
2 files changed, 4 insertions(+), 16 deletions(-)
diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c
index 15d71a7b75ba..f8514086bd6b 100644
--- a/arch/riscv/kvm/vcpu_pmu.c
+++ b/arch/riscv/kvm/vcpu_pmu.c
@@ -409,8 +409,6 @@ int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long s
int snapshot_area_size = sizeof(struct riscv_pmu_snapshot_data);
int sbiret = 0;
gpa_t saddr;
- unsigned long hva;
- bool writable;
if (!kvpmu || flags) {
sbiret = SBI_ERR_INVALID_PARAM;
@@ -432,19 +430,14 @@ int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long s
goto out;
}
- hva = kvm_vcpu_gfn_to_hva_prot(vcpu, saddr >> PAGE_SHIFT, &writable);
- if (kvm_is_error_hva(hva) || !writable) {
- sbiret = SBI_ERR_INVALID_ADDRESS;
- goto out;
- }
-
kvpmu->sdata = kzalloc(snapshot_area_size, GFP_ATOMIC);
if (!kvpmu->sdata)
return -ENOMEM;
+ /* No need to check writable slot explicitly as kvm_vcpu_write_guest does it internally */
if (kvm_vcpu_write_guest(vcpu, saddr, kvpmu->sdata, snapshot_area_size)) {
kfree(kvpmu->sdata);
- sbiret = SBI_ERR_FAILURE;
+ sbiret = SBI_ERR_INVALID_ADDRESS;
goto out;
}
diff --git a/arch/riscv/kvm/vcpu_sbi_sta.c b/arch/riscv/kvm/vcpu_sbi_sta.c
index cc6cb7c8f0e4..caaa28460ca4 100644
--- a/arch/riscv/kvm/vcpu_sbi_sta.c
+++ b/arch/riscv/kvm/vcpu_sbi_sta.c
@@ -85,8 +85,6 @@ static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu)
unsigned long shmem_phys_hi = cp->a1;
u32 flags = cp->a2;
struct sbi_sta_struct zero_sta = {0};
- unsigned long hva;
- bool writable;
gpa_t shmem;
int ret;
@@ -111,13 +109,10 @@ static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu)
return SBI_ERR_INVALID_ADDRESS;
}
- hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable);
- if (kvm_is_error_hva(hva) || !writable)
- return SBI_ERR_INVALID_ADDRESS;
-
+ /* No need to check writable slot explicitly as kvm_vcpu_write_guest does it internally */
ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta));
if (ret)
- return SBI_ERR_FAILURE;
+ return SBI_ERR_INVALID_ADDRESS;
vcpu->arch.sta.shmem = shmem;
vcpu->arch.sta.last_steal = current->sched_info.run_delay;
--
2.43.0
On Tue, Sep 9, 2025 at 12:33 PM Atish Patra <atishp@rivosinc.com> wrote: > > There is not much value in checking if a memslot is writable explicitly > before a write as it may change underneath after the check. Rather, return > invalid address error when write_guest fails as it checks if the slot > is writable anyways. > > Suggested-by: Sean Christopherson <seanjc@google.com> > Signed-off-by: Atish Patra <atishp@rivosinc.com> LGTM. Reviewed-by: Anup Patel <anup@brainfault.org> Regards, Anup > --- > arch/riscv/kvm/vcpu_pmu.c | 11 ++--------- > arch/riscv/kvm/vcpu_sbi_sta.c | 9 ++------- > 2 files changed, 4 insertions(+), 16 deletions(-) > > diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c > index 15d71a7b75ba..f8514086bd6b 100644 > --- a/arch/riscv/kvm/vcpu_pmu.c > +++ b/arch/riscv/kvm/vcpu_pmu.c > @@ -409,8 +409,6 @@ int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long s > int snapshot_area_size = sizeof(struct riscv_pmu_snapshot_data); > int sbiret = 0; > gpa_t saddr; > - unsigned long hva; > - bool writable; > > if (!kvpmu || flags) { > sbiret = SBI_ERR_INVALID_PARAM; > @@ -432,19 +430,14 @@ int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long s > goto out; > } > > - hva = kvm_vcpu_gfn_to_hva_prot(vcpu, saddr >> PAGE_SHIFT, &writable); > - if (kvm_is_error_hva(hva) || !writable) { > - sbiret = SBI_ERR_INVALID_ADDRESS; > - goto out; > - } > - > kvpmu->sdata = kzalloc(snapshot_area_size, GFP_ATOMIC); > if (!kvpmu->sdata) > return -ENOMEM; > > + /* No need to check writable slot explicitly as kvm_vcpu_write_guest does it internally */ > if (kvm_vcpu_write_guest(vcpu, saddr, kvpmu->sdata, snapshot_area_size)) { > kfree(kvpmu->sdata); > - sbiret = SBI_ERR_FAILURE; > + sbiret = SBI_ERR_INVALID_ADDRESS; > goto out; > } > > diff --git a/arch/riscv/kvm/vcpu_sbi_sta.c b/arch/riscv/kvm/vcpu_sbi_sta.c > index cc6cb7c8f0e4..caaa28460ca4 100644 > --- a/arch/riscv/kvm/vcpu_sbi_sta.c > +++ b/arch/riscv/kvm/vcpu_sbi_sta.c > @@ -85,8 +85,6 @@ static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu) > unsigned long shmem_phys_hi = cp->a1; > u32 flags = cp->a2; > struct sbi_sta_struct zero_sta = {0}; > - unsigned long hva; > - bool writable; > gpa_t shmem; > int ret; > > @@ -111,13 +109,10 @@ static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu) > return SBI_ERR_INVALID_ADDRESS; > } > > - hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable); > - if (kvm_is_error_hva(hva) || !writable) > - return SBI_ERR_INVALID_ADDRESS; > - > + /* No need to check writable slot explicitly as kvm_vcpu_write_guest does it internally */ > ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta)); > if (ret) > - return SBI_ERR_FAILURE; > + return SBI_ERR_INVALID_ADDRESS; > > vcpu->arch.sta.shmem = shmem; > vcpu->arch.sta.last_steal = current->sched_info.run_delay; > > -- > 2.43.0 >
© 2016 - 2025 Red Hat, Inc.