kvm_handle_hva_range is only used by the young notifiers. In a later
patch, it will be even further tied to the young notifiers. Instead of
renaming kvm_handle_hva_range to something like
kvm_handle_hva_range_young, simply remove kvm_handle_hva_range. This
seems slightly more readable, though there is slightly more code
duplication.
Finally, rename __kvm_handle_hva_range to kvm_handle_hva_range, now that
the name is available.
Suggested-by: David Matlack <dmatlack@google.com>
Signed-off-by: James Houghton <jthoughton@google.com>
---
virt/kvm/kvm_main.c | 74 +++++++++++++++++++++++----------------------
1 file changed, 38 insertions(+), 36 deletions(-)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 27186b06518a..8b234a9acdb3 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -551,8 +551,8 @@ static void kvm_null_fn(void)
node; \
node = interval_tree_iter_next(node, start, last)) \
-static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
- const struct kvm_mmu_notifier_range *range)
+static __always_inline kvm_mn_ret_t kvm_handle_hva_range(struct kvm *kvm,
+ const struct kvm_mmu_notifier_range *range)
{
struct kvm_mmu_notifier_return r = {
.ret = false,
@@ -628,33 +628,6 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
return r;
}
-static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
- unsigned long start,
- unsigned long end,
- gfn_handler_t handler,
- bool flush_on_ret)
-{
- struct kvm *kvm = mmu_notifier_to_kvm(mn);
- const struct kvm_mmu_notifier_range range = {
- .start = start,
- .end = end,
- .handler = handler,
- .on_lock = (void *)kvm_null_fn,
- .flush_on_ret = flush_on_ret,
- .may_block = false,
- };
-
- return __kvm_handle_hva_range(kvm, &range).ret;
-}
-
-static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
- unsigned long start,
- unsigned long end,
- gfn_handler_t handler)
-{
- return kvm_handle_hva_range(mn, start, end, handler, false);
-}
-
void kvm_mmu_invalidate_begin(struct kvm *kvm)
{
lockdep_assert_held_write(&kvm->mmu_lock);
@@ -747,7 +720,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
* that guest memory has been reclaimed. This needs to be done *after*
* dropping mmu_lock, as x86's reclaim path is slooooow.
*/
- if (__kvm_handle_hva_range(kvm, &hva_range).found_memslot)
+ if (kvm_handle_hva_range(kvm, &hva_range).found_memslot)
kvm_arch_guest_memory_reclaimed(kvm);
return 0;
@@ -793,7 +766,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
};
bool wake;
- __kvm_handle_hva_range(kvm, &hva_range);
+ kvm_handle_hva_range(kvm, &hva_range);
/* Pairs with the increment in range_start(). */
spin_lock(&kvm->mn_invalidate_lock);
@@ -815,10 +788,20 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
unsigned long start,
unsigned long end)
{
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ const struct kvm_mmu_notifier_range range = {
+ .start = start,
+ .end = end,
+ .handler = kvm_age_gfn,
+ .on_lock = (void *)kvm_null_fn,
+ .flush_on_ret =
+ !IS_ENABLED(CONFIG_KVM_ELIDE_TLB_FLUSH_IF_YOUNG),
+ .may_block = false,
+ };
+
trace_kvm_age_hva(start, end);
- return kvm_handle_hva_range(mn, start, end, kvm_age_gfn,
- !IS_ENABLED(CONFIG_KVM_ELIDE_TLB_FLUSH_IF_YOUNG));
+ return kvm_handle_hva_range(kvm, &range).ret;
}
static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
@@ -826,6 +809,16 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
unsigned long start,
unsigned long end)
{
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ const struct kvm_mmu_notifier_range range = {
+ .start = start,
+ .end = end,
+ .handler = kvm_age_gfn,
+ .on_lock = (void *)kvm_null_fn,
+ .flush_on_ret = false,
+ .may_block = false,
+ };
+
trace_kvm_age_hva(start, end);
/*
@@ -841,17 +834,26 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
* cadence. If we find this inaccurate, we might come up with a
* more sophisticated heuristic later.
*/
- return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
+ return kvm_handle_hva_range(kvm, &range).ret;
}
static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
{
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ const struct kvm_mmu_notifier_range range = {
+ .start = address,
+ .end = address + 1,
+ .handler = kvm_test_age_gfn,
+ .on_lock = (void *)kvm_null_fn,
+ .flush_on_ret = false,
+ .may_block = false,
+ };
+
trace_kvm_test_age_hva(address);
- return kvm_handle_hva_range_no_flush(mn, address, address + 1,
- kvm_test_age_gfn);
+ return kvm_handle_hva_range(kvm, &range).ret;
}
static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
--
2.47.0.199.ga7371fff76-goog
On Tue, Nov 05, 2024, James Houghton wrote:
> kvm_handle_hva_range is only used by the young notifiers. In a later
> patch, it will be even further tied to the young notifiers. Instead of
> renaming kvm_handle_hva_range to something like
When referencing functions, include parantheses so its super obvious that the
symbol is a function(), e.g. kvm_handle_hva_range(), kvm_handle_hva_range_young(),
etc.
> kvm_handle_hva_range_young, simply remove kvm_handle_hva_range. This
> seems slightly more readable,
I disagree, quite strongly in fact. The amount of duplication makes it harder
to see the differences between the three aging flow, and the fewer instances of
this pattern:
return kvm_handle_hva_range(kvm, &range).ret;
the better. I added the tuple return as a way to avoid an out-param (which I
still think is a good tradeoff), but there's definitely a cost to it.
> though there is slightly more code duplication.
Heh, you have a different definition of "slightly". The total lines of code may
be close to a wash, but at the end of the series there's ~10 lines of code that
is nearly identical in three different places.
My vote is for this:
---
virt/kvm/kvm_main.c | 27 +++++++++++++--------------
1 file changed, 13 insertions(+), 14 deletions(-)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index de2c11dae231..bf4670e9fcc6 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -551,8 +551,8 @@ static void kvm_null_fn(void)
node; \
node = interval_tree_iter_next(node, start, last)) \
-static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
- const struct kvm_mmu_notifier_range *range)
+static __always_inline kvm_mn_ret_t kvm_handle_hva_range(struct kvm *kvm,
+ const struct kvm_mmu_notifier_range *range)
{
struct kvm_mmu_notifier_return r = {
.ret = false,
@@ -628,7 +628,7 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
return r;
}
-static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
+static __always_inline int kvm_age_hva_range(struct mmu_notifier *mn,
unsigned long start,
unsigned long end,
gfn_handler_t handler,
@@ -647,10 +647,10 @@ static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
return __kvm_handle_hva_range(kvm, &range).ret;
}
-static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
- unsigned long start,
- unsigned long end,
- gfn_handler_t handler)
+static __always_inline int kvm_age_hva_range_no_flush(struct mmu_notifier *mn,
+ unsigned long start,
+ unsigned long end,
+ gfn_handler_t handler)
{
return kvm_handle_hva_range(mn, start, end, handler, false);
}
@@ -747,7 +747,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
* that guest memory has been reclaimed. This needs to be done *after*
* dropping mmu_lock, as x86's reclaim path is slooooow.
*/
- if (__kvm_handle_hva_range(kvm, &hva_range).found_memslot)
+ if (kvm_handle_hva_range(kvm, &hva_range).found_memslot)
kvm_arch_guest_memory_reclaimed(kvm);
return 0;
@@ -793,7 +793,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
};
bool wake;
- __kvm_handle_hva_range(kvm, &hva_range);
+ kvm_handle_hva_range(kvm, &hva_range);
/* Pairs with the increment in range_start(). */
spin_lock(&kvm->mn_invalidate_lock);
@@ -817,8 +817,8 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
{
trace_kvm_age_hva(start, end);
- return kvm_handle_hva_range(mn, start, end, kvm_age_gfn,
- !IS_ENABLED(CONFIG_KVM_ELIDE_TLB_FLUSH_IF_YOUNG));
+ return kvm_age_hva_range(mn, start, end, kvm_age_gfn,
+ !IS_ENABLED(CONFIG_KVM_ELIDE_TLB_FLUSH_IF_YOUNG));
}
static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
@@ -841,7 +841,7 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
* cadence. If we find this inaccurate, we might come up with a
* more sophisticated heuristic later.
*/
- return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
+ return kvm_age_hva_range_no_flush(mn, start, end, kvm_age_gfn);
}
static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
@@ -850,8 +850,7 @@ static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
{
trace_kvm_test_age_hva(address);
- return kvm_handle_hva_range_no_flush(mn, address, address + 1,
- kvm_test_age_gfn);
+ return kvm_age_hva_range_no_flush(mn, address, address + 1, kvm_test_age_gfn);
}
static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
base-commit: 2d5faa6a8402435d6332e8e8f3c3f18cca382d83
--
On Fri, Jan 10, 2025 at 2:15 PM Sean Christopherson <seanjc@google.com> wrote:
>
> On Tue, Nov 05, 2024, James Houghton wrote:
> > kvm_handle_hva_range is only used by the young notifiers. In a later
> > patch, it will be even further tied to the young notifiers. Instead of
> > renaming kvm_handle_hva_range to something like
>
> When referencing functions, include parantheses so its super obvious that the
> symbol is a function(), e.g. kvm_handle_hva_range(), kvm_handle_hva_range_young(),
> etc.
Thanks Sean, I think I've fixed up all the cases now.
>
> > kvm_handle_hva_range_young, simply remove kvm_handle_hva_range. This
> > seems slightly more readable,
>
> I disagree, quite strongly in fact. The amount of duplication makes it harder
> to see the differences between the three aging flow, and the fewer instances of
> this pattern:
>
> return kvm_handle_hva_range(kvm, &range).ret;
>
> the better. I added the tuple return as a way to avoid an out-param (which I
> still think is a good tradeoff), but there's definitely a cost to it.
>
> > though there is slightly more code duplication.
>
> Heh, you have a different definition of "slightly". The total lines of code may
> be close to a wash, but at the end of the series there's ~10 lines of code that
> is nearly identical in three different places.
>
> My vote is for this:
I applied this patch verbatim as a replacement for the original one.
Since [1], the "refactor" in this original patch makes much less sense. Thanks!
[1]: commit 28f8b61a69b5c ("KVM: Allow arch code to elide TLB flushes
when aging a young page")
> ---
> virt/kvm/kvm_main.c | 27 +++++++++++++--------------
> 1 file changed, 13 insertions(+), 14 deletions(-)
>
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index de2c11dae231..bf4670e9fcc6 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -551,8 +551,8 @@ static void kvm_null_fn(void)
> node; \
> node = interval_tree_iter_next(node, start, last)) \
>
> -static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
> - const struct kvm_mmu_notifier_range *range)
> +static __always_inline kvm_mn_ret_t kvm_handle_hva_range(struct kvm *kvm,
> + const struct kvm_mmu_notifier_range *range)
> {
> struct kvm_mmu_notifier_return r = {
> .ret = false,
> @@ -628,7 +628,7 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
> return r;
> }
>
> -static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
> +static __always_inline int kvm_age_hva_range(struct mmu_notifier *mn,
> unsigned long start,
> unsigned long end,
> gfn_handler_t handler,
> @@ -647,10 +647,10 @@ static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
> return __kvm_handle_hva_range(kvm, &range).ret;
> }
>
> -static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
> - unsigned long start,
> - unsigned long end,
> - gfn_handler_t handler)
> +static __always_inline int kvm_age_hva_range_no_flush(struct mmu_notifier *mn,
> + unsigned long start,
> + unsigned long end,
> + gfn_handler_t handler)
> {
> return kvm_handle_hva_range(mn, start, end, handler, false);
> }
> @@ -747,7 +747,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
> * that guest memory has been reclaimed. This needs to be done *after*
> * dropping mmu_lock, as x86's reclaim path is slooooow.
> */
> - if (__kvm_handle_hva_range(kvm, &hva_range).found_memslot)
> + if (kvm_handle_hva_range(kvm, &hva_range).found_memslot)
> kvm_arch_guest_memory_reclaimed(kvm);
>
> return 0;
> @@ -793,7 +793,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
> };
> bool wake;
>
> - __kvm_handle_hva_range(kvm, &hva_range);
> + kvm_handle_hva_range(kvm, &hva_range);
>
> /* Pairs with the increment in range_start(). */
> spin_lock(&kvm->mn_invalidate_lock);
> @@ -817,8 +817,8 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
> {
> trace_kvm_age_hva(start, end);
>
> - return kvm_handle_hva_range(mn, start, end, kvm_age_gfn,
> - !IS_ENABLED(CONFIG_KVM_ELIDE_TLB_FLUSH_IF_YOUNG));
> + return kvm_age_hva_range(mn, start, end, kvm_age_gfn,
> + !IS_ENABLED(CONFIG_KVM_ELIDE_TLB_FLUSH_IF_YOUNG));
> }
>
> static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
> @@ -841,7 +841,7 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
> * cadence. If we find this inaccurate, we might come up with a
> * more sophisticated heuristic later.
> */
> - return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
> + return kvm_age_hva_range_no_flush(mn, start, end, kvm_age_gfn);
> }
>
> static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
> @@ -850,8 +850,7 @@ static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
> {
> trace_kvm_test_age_hva(address);
>
> - return kvm_handle_hva_range_no_flush(mn, address, address + 1,
> - kvm_test_age_gfn);
> + return kvm_age_hva_range_no_flush(mn, address, address + 1, kvm_test_age_gfn);
> }
>
> static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
>
> base-commit: 2d5faa6a8402435d6332e8e8f3c3f18cca382d83
> --
© 2016 - 2026 Red Hat, Inc.