arch/loongarch/kvm/mmu.c | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-)
In the function kvm_map_page_fast, the assignment of 'ret' is
redundant, so remove it.
Signed-off-by: tangbin <tangbin@cmss.chinamobile.com>
---
arch/loongarch/kvm/mmu.c | 17 +++++------------
1 file changed, 5 insertions(+), 12 deletions(-)
diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c
index 2634a9e8d..d6c922a4a 100644
--- a/arch/loongarch/kvm/mmu.c
+++ b/arch/loongarch/kvm/mmu.c
@@ -551,7 +551,6 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
*/
static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
{
- int ret = 0;
kvm_pfn_t pfn = 0;
kvm_pte_t *ptep, changed, new;
gfn_t gfn = gpa >> PAGE_SHIFT;
@@ -563,20 +562,16 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
/* Fast path - just check GPA page table for an existing entry */
ptep = kvm_populate_gpa(kvm, NULL, gpa, 0);
- if (!ptep || !kvm_pte_present(NULL, ptep)) {
- ret = -EFAULT;
+ if (!ptep || !kvm_pte_present(NULL, ptep))
goto out;
- }
/* Track access to pages marked old */
new = kvm_pte_mkyoung(*ptep);
/* call kvm_set_pfn_accessed() after unlock */
if (write && !kvm_pte_dirty(new)) {
- if (!kvm_pte_write(new)) {
- ret = -EFAULT;
+ if (!kvm_pte_write(new))
goto out;
- }
if (kvm_pte_huge(new)) {
/*
@@ -584,10 +579,8 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
* enabled for HugePages
*/
slot = gfn_to_memslot(kvm, gfn);
- if (kvm_slot_dirty_track_enabled(slot)) {
- ret = -EFAULT;
+ if (kvm_slot_dirty_track_enabled(slot))
goto out;
- }
}
/* Track dirtying of writeable pages */
@@ -615,10 +608,10 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
if (page)
put_page(page);
}
- return ret;
+ return 0;
out:
spin_unlock(&kvm->mmu_lock);
- return ret;
+ return -EFAULT;
}
static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot,
--
2.18.4
Hi Tangbin,
There is only return value with -EFAULT or 0 in function
kvm_map_page_fast(), if there is better error code or different error
for new code, the situation will be different :)
I would like to keep existing code unchanged, however thanks for your patch.
Regards
Bibo Mao
On 2024/7/13 下午11:59, tangbin wrote:
> In the function kvm_map_page_fast, the assignment of 'ret' is
> redundant, so remove it.
>
> Signed-off-by: tangbin <tangbin@cmss.chinamobile.com>
> ---
> arch/loongarch/kvm/mmu.c | 17 +++++------------
> 1 file changed, 5 insertions(+), 12 deletions(-)
>
> diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c
> index 2634a9e8d..d6c922a4a 100644
> --- a/arch/loongarch/kvm/mmu.c
> +++ b/arch/loongarch/kvm/mmu.c
> @@ -551,7 +551,6 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
> */
> static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
> {
> - int ret = 0;
> kvm_pfn_t pfn = 0;
> kvm_pte_t *ptep, changed, new;
> gfn_t gfn = gpa >> PAGE_SHIFT;
> @@ -563,20 +562,16 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
>
> /* Fast path - just check GPA page table for an existing entry */
> ptep = kvm_populate_gpa(kvm, NULL, gpa, 0);
> - if (!ptep || !kvm_pte_present(NULL, ptep)) {
> - ret = -EFAULT;
> + if (!ptep || !kvm_pte_present(NULL, ptep))
> goto out;
> - }
>
> /* Track access to pages marked old */
> new = kvm_pte_mkyoung(*ptep);
> /* call kvm_set_pfn_accessed() after unlock */
>
> if (write && !kvm_pte_dirty(new)) {
> - if (!kvm_pte_write(new)) {
> - ret = -EFAULT;
> + if (!kvm_pte_write(new))
> goto out;
> - }
>
> if (kvm_pte_huge(new)) {
> /*
> @@ -584,10 +579,8 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
> * enabled for HugePages
> */
> slot = gfn_to_memslot(kvm, gfn);
> - if (kvm_slot_dirty_track_enabled(slot)) {
> - ret = -EFAULT;
> + if (kvm_slot_dirty_track_enabled(slot))
> goto out;
> - }
> }
>
> /* Track dirtying of writeable pages */
> @@ -615,10 +608,10 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
> if (page)
> put_page(page);
> }
> - return ret;
> + return 0;
> out:
> spin_unlock(&kvm->mmu_lock);
> - return ret;
> + return -EFAULT;
> }
>
> static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot,
>
© 2016 - 2026 Red Hat, Inc.