Currently, when the map path of stage2 page-table coalesces a
bunch of pages into a hugepage, KVM invalidates the entire
VM's TLB entries. This would cause a perforamance penality for
the guest whose pages have already been coalesced earlier as they
would have to refill their TLB entries unnecessarily again.
Hence, if the system supports it, use __kvm_tlb_flush_range_vmid_ipa()
to flush only the range of pages that have been combined into
a hugepage, while leaving other TLB entries alone.
Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
---
arch/arm64/kvm/hyp/pgtable.c | 29 +++++++++++++++++++++++++----
1 file changed, 25 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index b11cf2c618a6c..099032bb01bce 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -686,6 +686,22 @@ static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_
return cmpxchg(ctx->ptep, ctx->old, new) == ctx->old;
}
+static void kvm_table_pte_flush(struct kvm_s2_mmu *mmu, u64 addr, u32 level, u32 tlb_level)
+{
+ if (system_supports_tlb_range()) {
+ u64 end = addr + kvm_granule_size(level);
+
+ kvm_call_hyp(__kvm_tlb_flush_range_vmid_ipa, mmu, addr, end, tlb_level);
+ } else {
+ /*
+ * Invalidate the whole stage-2, as we may have numerous leaf
+ * entries below us which would otherwise need invalidating
+ * individually.
+ */
+ kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
+ }
+}
+
/**
* stage2_try_break_pte() - Invalidates a pte according to the
* 'break-before-make' requirements of the
@@ -693,6 +709,7 @@ static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_
*
* @ctx: context of the visited pte.
* @mmu: stage-2 mmu
+ * @tlb_level: The level at which the leaf pages are expected (for FEAT_TTL hint)
*
* Returns: true if the pte was successfully broken.
*
@@ -701,7 +718,7 @@ static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_
* on the containing table page.
*/
static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
- struct kvm_s2_mmu *mmu)
+ struct kvm_s2_mmu *mmu, u32 tlb_level)
{
struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
@@ -722,7 +739,7 @@ static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
* value (if any).
*/
if (kvm_pte_table(ctx->old, ctx->level))
- kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
+ kvm_table_pte_flush(mmu, ctx->addr, ctx->level, tlb_level);
else if (kvm_pte_valid(ctx->old))
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level);
@@ -804,7 +821,7 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
if (!stage2_pte_needs_update(ctx->old, new))
return -EAGAIN;
- if (!stage2_try_break_pte(ctx, data->mmu))
+ if (!stage2_try_break_pte(ctx, data->mmu, ctx->level))
return -EAGAIN;
/* Perform CMOs before installation of the guest stage-2 PTE */
@@ -861,7 +878,11 @@ static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx,
if (!childp)
return -ENOMEM;
- if (!stage2_try_break_pte(ctx, data->mmu)) {
+ /*
+ * As the table will be replaced with a block, one level down would
+ * be the current page entries held by the table.
+ */
+ if (!stage2_try_break_pte(ctx, data->mmu, ctx->level + 1)) {
mm_ops->put_page(childp);
return -EAGAIN;
}
--
2.39.0.314.g84b9a713c41-goog
Hi Raghavendra,
My comment from the previous change also applies here:
KVM: arm64: Use range-based TLBIs when collapsing hugepages
On Mon, Jan 09, 2023 at 09:53:46PM +0000, Raghavendra Rao Ananta wrote:
> Currently, when the map path of stage2 page-table coalesces a
> bunch of pages into a hugepage, KVM invalidates the entire
> VM's TLB entries. This would cause a perforamance penality for
> the guest whose pages have already been coalesced earlier as they
> would have to refill their TLB entries unnecessarily again.
It is also problematic that we do this on every single fault where we
collapse a hugepage.
> Hence, if the system supports it, use __kvm_tlb_flush_range_vmid_ipa()
> to flush only the range of pages that have been combined into
> a hugepage, while leaving other TLB entries alone.
>
> Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
> ---
> arch/arm64/kvm/hyp/pgtable.c | 29 +++++++++++++++++++++++++----
> 1 file changed, 25 insertions(+), 4 deletions(-)
>
> diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
> index b11cf2c618a6c..099032bb01bce 100644
> --- a/arch/arm64/kvm/hyp/pgtable.c
> +++ b/arch/arm64/kvm/hyp/pgtable.c
> @@ -686,6 +686,22 @@ static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_
> return cmpxchg(ctx->ptep, ctx->old, new) == ctx->old;
> }
>
> +static void kvm_table_pte_flush(struct kvm_s2_mmu *mmu, u64 addr, u32 level, u32 tlb_level)
Could you call this something like kvm_pgtable_flush_range() and take an
address range as an argument? TLBIRANGE can be used outside the context
of a table (i.e. a subset of PTEs).
> +{
> + if (system_supports_tlb_range()) {
> + u64 end = addr + kvm_granule_size(level);
> +
> + kvm_call_hyp(__kvm_tlb_flush_range_vmid_ipa, mmu, addr, end, tlb_level);
> + } else {
> + /*
> + * Invalidate the whole stage-2, as we may have numerous leaf
> + * entries below us which would otherwise need invalidating
> + * individually.
> + */
> + kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
> + }
> +}
> +
> /**
> * stage2_try_break_pte() - Invalidates a pte according to the
> * 'break-before-make' requirements of the
> @@ -693,6 +709,7 @@ static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_
> *
> * @ctx: context of the visited pte.
> * @mmu: stage-2 mmu
> + * @tlb_level: The level at which the leaf pages are expected (for FEAT_TTL hint)
Do we need the caller to provide the TTL hint? We already have
ctx->level, and stage2_try_break_pte() also knows what the removed PTE
contained (i.e. a table or a block/page).
> * Returns: true if the pte was successfully broken.
> *
> @@ -701,7 +718,7 @@ static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_
> * on the containing table page.
> */
> static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
> - struct kvm_s2_mmu *mmu)
> + struct kvm_s2_mmu *mmu, u32 tlb_level)
> {
> struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
>
> @@ -722,7 +739,7 @@ static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
> * value (if any).
> */
> if (kvm_pte_table(ctx->old, ctx->level))
> - kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
> + kvm_table_pte_flush(mmu, ctx->addr, ctx->level, tlb_level);
I don't think we should provide a TTL hint for a removed table. It is
entirely possible for the unlinked table to contain a mix of blocks and
pages, meaning there isn't a uniform table level for the whole range.
> else if (kvm_pte_valid(ctx->old))
> kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level);
>
> @@ -804,7 +821,7 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
> if (!stage2_pte_needs_update(ctx->old, new))
> return -EAGAIN;
>
> - if (!stage2_try_break_pte(ctx, data->mmu))
> + if (!stage2_try_break_pte(ctx, data->mmu, ctx->level))
> return -EAGAIN;
>
> /* Perform CMOs before installation of the guest stage-2 PTE */
> @@ -861,7 +878,11 @@ static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx,
> if (!childp)
> return -ENOMEM;
>
> - if (!stage2_try_break_pte(ctx, data->mmu)) {
> + /*
> + * As the table will be replaced with a block, one level down would
> + * be the current page entries held by the table.
> + */
This isn't necessarily true. Ignoring mixed block/pages for a moment,
Collapsing a PUD entry into a block after dirty logging (where we mapped
at PTE level) would imply a TTL of ctx->level + 2.
But again, I think it is best to provide no hint in this case.
--
Thanks,
Oliver
Hi Oliver,
On Tue, Jan 24, 2023 at 3:25 PM Oliver Upton <oliver.upton@linux.dev> wrote:
>
> Hi Raghavendra,
>
> My comment from the previous change also applies here:
>
> KVM: arm64: Use range-based TLBIs when collapsing hugepages
>
> On Mon, Jan 09, 2023 at 09:53:46PM +0000, Raghavendra Rao Ananta wrote:
> > Currently, when the map path of stage2 page-table coalesces a
> > bunch of pages into a hugepage, KVM invalidates the entire
> > VM's TLB entries. This would cause a perforamance penality for
> > the guest whose pages have already been coalesced earlier as they
> > would have to refill their TLB entries unnecessarily again.
>
> It is also problematic that we do this on every single fault where we
> collapse a hugepage.
>
Yes! I'll also include this description in v2.
> > Hence, if the system supports it, use __kvm_tlb_flush_range_vmid_ipa()
> > to flush only the range of pages that have been combined into
> > a hugepage, while leaving other TLB entries alone.
> >
> > Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
> > ---
> > arch/arm64/kvm/hyp/pgtable.c | 29 +++++++++++++++++++++++++----
> > 1 file changed, 25 insertions(+), 4 deletions(-)
> >
> > diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
> > index b11cf2c618a6c..099032bb01bce 100644
> > --- a/arch/arm64/kvm/hyp/pgtable.c
> > +++ b/arch/arm64/kvm/hyp/pgtable.c
> > @@ -686,6 +686,22 @@ static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_
> > return cmpxchg(ctx->ptep, ctx->old, new) == ctx->old;
> > }
> >
> > +static void kvm_table_pte_flush(struct kvm_s2_mmu *mmu, u64 addr, u32 level, u32 tlb_level)
>
> Could you call this something like kvm_pgtable_flush_range() and take an
> address range as an argument? TLBIRANGE can be used outside the context
> of a table (i.e. a subset of PTEs).
>
Good idea. In that case, the function becomes very close to arm64's
implementation of kvm_flush_remote_tlbs_range() on top of David's
series [1].
Too bad, we may not be able to invoke that. I'll rename the function.
> > +{
> > + if (system_supports_tlb_range()) {
> > + u64 end = addr + kvm_granule_size(level);
> > +
> > + kvm_call_hyp(__kvm_tlb_flush_range_vmid_ipa, mmu, addr, end, tlb_level);
> > + } else {
> > + /*
> > + * Invalidate the whole stage-2, as we may have numerous leaf
> > + * entries below us which would otherwise need invalidating
> > + * individually.
> > + */
> > + kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
> > + }
> > +}
> > +
> > /**
> > * stage2_try_break_pte() - Invalidates a pte according to the
> > * 'break-before-make' requirements of the
> > @@ -693,6 +709,7 @@ static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_
> > *
> > * @ctx: context of the visited pte.
> > * @mmu: stage-2 mmu
> > + * @tlb_level: The level at which the leaf pages are expected (for FEAT_TTL hint)
>
> Do we need the caller to provide the TTL hint? We already have
> ctx->level, and stage2_try_break_pte() also knows what the removed PTE
> contained (i.e. a table or a block/page).
>
ctx->level may not always translate to TTL level hint. For example,
the patch 6/6 of this series also calls stage2_try_break_pte(), but
from the very top level (level-1). In that case, the level can be
extracted from ctx, but we won't have any idea what the TTL hint is
since we won't be traversing all of the page-table. As a result, we
pass 0. However, if we are remapping a table to a bunch of pages, TTL
level hint could be just one level down.
> > * Returns: true if the pte was successfully broken.
> > *
> > @@ -701,7 +718,7 @@ static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_
> > * on the containing table page.
> > */
> > static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
> > - struct kvm_s2_mmu *mmu)
> > + struct kvm_s2_mmu *mmu, u32 tlb_level)
> > {
> > struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
> >
> > @@ -722,7 +739,7 @@ static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
> > * value (if any).
> > */
> > if (kvm_pte_table(ctx->old, ctx->level))
> > - kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
> > + kvm_table_pte_flush(mmu, ctx->addr, ctx->level, tlb_level);
>
> I don't think we should provide a TTL hint for a removed table. It is
> entirely possible for the unlinked table to contain a mix of blocks and
> pages, meaning there isn't a uniform table level for the whole range.
>
True, but it's particularly useful for the case where we know that a
table is going to be replaced by a block. In that case, can't we be
sure of the TTL level? Perhaps for other cases, we can supply 0. WDYT?
> > else if (kvm_pte_valid(ctx->old))
> > kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level);
> >
> > @@ -804,7 +821,7 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
> > if (!stage2_pte_needs_update(ctx->old, new))
> > return -EAGAIN;
> >
> > - if (!stage2_try_break_pte(ctx, data->mmu))
> > + if (!stage2_try_break_pte(ctx, data->mmu, ctx->level))
> > return -EAGAIN;
> >
> > /* Perform CMOs before installation of the guest stage-2 PTE */
> > @@ -861,7 +878,11 @@ static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx,
> > if (!childp)
> > return -ENOMEM;
> >
> > - if (!stage2_try_break_pte(ctx, data->mmu)) {
> > + /*
> > + * As the table will be replaced with a block, one level down would
> > + * be the current page entries held by the table.
> > + */
>
> This isn't necessarily true. Ignoring mixed block/pages for a moment,
> Collapsing a PUD entry into a block after dirty logging (where we mapped
> at PTE level) would imply a TTL of ctx->level + 2.
>
> But again, I think it is best to provide no hint in this case.
>
Ah, right. We could also collapse into a 1G block. No hint would be
better in that case.
Thanks,
Raghavendra
[1]: https://lore.kernel.org/linux-arm-kernel/20230119173559.2517103-1-dmatlack@google.com/T/#meca775fa8accc996900d5e9caeb004f7b9774627
> --
> Thanks,
> Oliver
© 2016 - 2026 Red Hat, Inc.