arch/arm64/kvm/mmu.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-)
FEAT_HDBSS speeds up guest memory dirty tracking by avoiding a page fault
and saving the entry in a tracking structure.
That may be a problem when we have guest memory backed by hugepages or
transparent huge pages, as it's not possible to do on-demand hugepage
splitting, relying only on eager hugepage splitting.
So, at stage2 initialization, enable eager hugepage splitting with
chunk = PAGE_SIZE if the system supports HDBSS.
Signed-off-by: Leonardo Bras <leo.bras@arm.com>
---
arch/arm64/kvm/mmu.c | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 070a01e53fcb..bdfa72b7c073 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -993,22 +993,26 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t
mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran));
if (!mmu->last_vcpu_ran) {
err = -ENOMEM;
goto out_destroy_pgtable;
}
for_each_possible_cpu(cpu)
*per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
- /* The eager page splitting is disabled by default */
- mmu->split_page_chunk_size = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT;
+ /* The eager page splitting is disabled by default if system has no HDBSS */
+ if (system_supports_hacdbs())
+ mmu->split_page_chunk_size = PAGE_SIZE;
+ else
+ mmu->split_page_chunk_size = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT;
+
mmu->split_page_cache.gfp_zero = __GFP_ZERO;
mmu->pgd_phys = __pa(pgt->pgd);
if (kvm_is_nested_s2_mmu(kvm, mmu))
kvm_init_nested_s2_mmu(mmu);
return 0;
out_destroy_pgtable:
--
2.53.0
On 3/26/2026 2:20 AM, Leonardo Bras wrote:
> FEAT_HDBSS speeds up guest memory dirty tracking by avoiding a page fault
> and saving the entry in a tracking structure.
>
> That may be a problem when we have guest memory backed by hugepages or
> transparent huge pages, as it's not possible to do on-demand hugepage
> splitting, relying only on eager hugepage splitting.
>
> So, at stage2 initialization, enable eager hugepage splitting with
> chunk = PAGE_SIZE if the system supports HDBSS.
>
> Signed-off-by: Leonardo Bras <leo.bras@arm.com>
> ---
> arch/arm64/kvm/mmu.c | 8 ++++++--
> 1 file changed, 6 insertions(+), 2 deletions(-)
>
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 070a01e53fcb..bdfa72b7c073 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -993,22 +993,26 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t
>
> mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran));
> if (!mmu->last_vcpu_ran) {
> err = -ENOMEM;
> goto out_destroy_pgtable;
> }
>
> for_each_possible_cpu(cpu)
> *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
>
> - /* The eager page splitting is disabled by default */
> - mmu->split_page_chunk_size = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT;
> + /* The eager page splitting is disabled by default if system has no HDBSS */
> + if (system_supports_hacdbs())
> + mmu->split_page_chunk_size = PAGE_SIZE;
> + else
> + mmu->split_page_chunk_size = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT;
> +
> mmu->split_page_cache.gfp_zero = __GFP_ZERO;
>
> mmu->pgd_phys = __pa(pgt->pgd);
>
> if (kvm_is_nested_s2_mmu(kvm, mmu))
> kvm_init_nested_s2_mmu(mmu);
>
> return 0;
>
> out_destroy_pgtable:
Thanks again for sending this patch. I'll integrate it into the next
version and run some tests.
On Fri, Mar 27, 2026 at 03:40:30PM +0800, Tian Zheng wrote:
>
> On 3/26/2026 2:20 AM, Leonardo Bras wrote:
> > FEAT_HDBSS speeds up guest memory dirty tracking by avoiding a page fault
> > and saving the entry in a tracking structure.
> >
> > That may be a problem when we have guest memory backed by hugepages or
> > transparent huge pages, as it's not possible to do on-demand hugepage
> > splitting, relying only on eager hugepage splitting.
> >
> > So, at stage2 initialization, enable eager hugepage splitting with
> > chunk = PAGE_SIZE if the system supports HDBSS.
> >
> > Signed-off-by: Leonardo Bras <leo.bras@arm.com>
> > ---
> > arch/arm64/kvm/mmu.c | 8 ++++++--
> > 1 file changed, 6 insertions(+), 2 deletions(-)
> >
> > diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> > index 070a01e53fcb..bdfa72b7c073 100644
> > --- a/arch/arm64/kvm/mmu.c
> > +++ b/arch/arm64/kvm/mmu.c
> > @@ -993,22 +993,26 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t
> > mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran));
> > if (!mmu->last_vcpu_ran) {
> > err = -ENOMEM;
> > goto out_destroy_pgtable;
> > }
> > for_each_possible_cpu(cpu)
> > *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
> > - /* The eager page splitting is disabled by default */
> > - mmu->split_page_chunk_size = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT;
> > + /* The eager page splitting is disabled by default if system has no HDBSS */
> > + if (system_supports_hacdbs())
> > + mmu->split_page_chunk_size = PAGE_SIZE;
> > + else
> > + mmu->split_page_chunk_size = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT;
> > +
> > mmu->split_page_cache.gfp_zero = __GFP_ZERO;
> > mmu->pgd_phys = __pa(pgt->pgd);
> > if (kvm_is_nested_s2_mmu(kvm, mmu))
> > kvm_init_nested_s2_mmu(mmu);
> > return 0;
> > out_destroy_pgtable:
>
>
> Thanks again for sending this patch. I'll integrate it into the next version
> and run some tests.
>
>
Awesome, thanks!
Leo
© 2016 - 2026 Red Hat, Inc.