From: Fangyu Yu <fangyu.yu@linux.alibaba.com>
Extend kvm_riscv_gstage_mode_detect() to record HGATP.MODE values in a
bitmask. Keep tracking the maximum supported G-stage page table level
for existing internal users.
Also provide lightweight helpers to retrieve the supported-mode bitmask
and validate a requested HGATP.MODE against it.
Signed-off-by: Fangyu Yu <fangyu.yu@linux.alibaba.com>
Reviewed-by: Andrew Jones <andrew.jones@oss.qualcomm.com>
Reviewed-by: Guo Ren <guoren@kernel.org>
---
arch/riscv/include/asm/kvm_gstage.h | 11 +++++++++++
arch/riscv/kvm/gstage.c | 15 ++++++++++++---
2 files changed, 23 insertions(+), 3 deletions(-)
diff --git a/arch/riscv/include/asm/kvm_gstage.h b/arch/riscv/include/asm/kvm_gstage.h
index 70d9d483365e..bbf8f45c6563 100644
--- a/arch/riscv/include/asm/kvm_gstage.h
+++ b/arch/riscv/include/asm/kvm_gstage.h
@@ -31,6 +31,7 @@ struct kvm_gstage_mapping {
#endif
extern unsigned long kvm_riscv_gstage_max_pgd_levels;
+extern u32 kvm_riscv_gstage_supported_mode_mask;
#define kvm_riscv_gstage_pgd_xbits 2
#define kvm_riscv_gstage_pgd_size (1UL << (HGATP_PAGE_SHIFT + kvm_riscv_gstage_pgd_xbits))
@@ -102,4 +103,14 @@ static inline void kvm_riscv_gstage_init(struct kvm_gstage *gstage, struct kvm *
gstage->pgd_levels = kvm->arch.pgd_levels;
}
+static inline u32 kvm_riscv_get_hgatp_mode_mask(void)
+{
+ return kvm_riscv_gstage_supported_mode_mask;
+}
+
+static inline bool kvm_riscv_hgatp_mode_is_valid(unsigned long mode)
+{
+ return kvm_riscv_gstage_supported_mode_mask & BIT(mode);
+}
+
#endif
diff --git a/arch/riscv/kvm/gstage.c b/arch/riscv/kvm/gstage.c
index 7c4c34bc191b..9204e6427d2d 100644
--- a/arch/riscv/kvm/gstage.c
+++ b/arch/riscv/kvm/gstage.c
@@ -16,6 +16,8 @@ unsigned long kvm_riscv_gstage_max_pgd_levels __ro_after_init = 3;
#else
unsigned long kvm_riscv_gstage_max_pgd_levels __ro_after_init = 2;
#endif
+/* Bitmask of supported HGATP.MODE encodings (BIT(HGATP_MODE_*)). */
+u32 kvm_riscv_gstage_supported_mode_mask __ro_after_init;
#define gstage_pte_leaf(__ptep) \
(pte_val(*(__ptep)) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC))
@@ -317,11 +319,17 @@ void kvm_riscv_gstage_wp_range(struct kvm_gstage *gstage, gpa_t start, gpa_t end
void __init kvm_riscv_gstage_mode_detect(void)
{
+ kvm_riscv_gstage_supported_mode_mask = 0;
+ kvm_riscv_gstage_max_pgd_levels = 0;
+
#ifdef CONFIG_64BIT
/* Try Sv57x4 G-stage mode */
csr_write(CSR_HGATP, HGATP_MODE_SV57X4 << HGATP_MODE_SHIFT);
if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV57X4) {
kvm_riscv_gstage_max_pgd_levels = 5;
+ kvm_riscv_gstage_supported_mode_mask |= BIT(HGATP_MODE_SV57X4) |
+ BIT(HGATP_MODE_SV48X4) |
+ BIT(HGATP_MODE_SV39X4);
goto done;
}
@@ -329,6 +337,8 @@ void __init kvm_riscv_gstage_mode_detect(void)
csr_write(CSR_HGATP, HGATP_MODE_SV48X4 << HGATP_MODE_SHIFT);
if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV48X4) {
kvm_riscv_gstage_max_pgd_levels = 4;
+ kvm_riscv_gstage_supported_mode_mask |= BIT(HGATP_MODE_SV48X4) |
+ BIT(HGATP_MODE_SV39X4);
goto done;
}
@@ -336,6 +346,7 @@ void __init kvm_riscv_gstage_mode_detect(void)
csr_write(CSR_HGATP, HGATP_MODE_SV39X4 << HGATP_MODE_SHIFT);
if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV39X4) {
kvm_riscv_gstage_max_pgd_levels = 3;
+ kvm_riscv_gstage_supported_mode_mask |= BIT(HGATP_MODE_SV39X4);
goto done;
}
#else /* CONFIG_32BIT */
@@ -343,13 +354,11 @@ void __init kvm_riscv_gstage_mode_detect(void)
csr_write(CSR_HGATP, HGATP_MODE_SV32X4 << HGATP_MODE_SHIFT);
if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV32X4) {
kvm_riscv_gstage_max_pgd_levels = 2;
+ kvm_riscv_gstage_supported_mode_mask |= BIT(HGATP_MODE_SV32X4);
goto done;
}
#endif
- /* KVM depends on !HGATP_MODE_OFF */
- kvm_riscv_gstage_max_pgd_levels = 0;
-
done:
csr_write(CSR_HGATP, 0);
kvm_riscv_local_hfence_gvma_all();
--
2.50.1
2026-04-02T21:23:02+08:00, <fangyu.yu@linux.alibaba.com>:
> From: Fangyu Yu <fangyu.yu@linux.alibaba.com>
>
> Extend kvm_riscv_gstage_mode_detect() to record HGATP.MODE values in a
> bitmask. Keep tracking the maximum supported G-stage page table level
> for existing internal users.
>
> Also provide lightweight helpers to retrieve the supported-mode bitmask
> and validate a requested HGATP.MODE against it.
>
> Signed-off-by: Fangyu Yu <fangyu.yu@linux.alibaba.com>
> Reviewed-by: Andrew Jones <andrew.jones@oss.qualcomm.com>
> Reviewed-by: Guo Ren <guoren@kernel.org>
> ---
> diff --git a/arch/riscv/include/asm/kvm_gstage.h b/arch/riscv/include/asm/kvm_gstage.h
> @@ -102,4 +103,14 @@ static inline void kvm_riscv_gstage_init(struct kvm_gstage *gstage, struct kvm *
> +static inline bool kvm_riscv_hgatp_mode_is_valid(unsigned long mode)
> +{
> + return kvm_riscv_gstage_supported_mode_mask & BIT(mode);
Shifting by more than the bit width is undefined behavior in C.
RV64 effectively translates BIT(mode) to 1UL << (mode & 0x3f), so this
could allow values larger than the mask.
Thanks.
>> From: Fangyu Yu <fangyu.yu@linux.alibaba.com>
>>
>> Extend kvm_riscv_gstage_mode_detect() to record HGATP.MODE values in a
>> bitmask. Keep tracking the maximum supported G-stage page table level
>> for existing internal users.
>>
>> Also provide lightweight helpers to retrieve the supported-mode bitmask
>> and validate a requested HGATP.MODE against it.
>>
>> Signed-off-by: Fangyu Yu <fangyu.yu@linux.alibaba.com>
>> Reviewed-by: Andrew Jones <andrew.jones@oss.qualcomm.com>
>> Reviewed-by: Guo Ren <guoren@kernel.org>
>> ---
>> diff --git a/arch/riscv/include/asm/kvm_gstage.h b/arch/riscv/include/asm/kvm_gstage.h
>> @@ -102,4 +103,14 @@ static inline void kvm_riscv_gstage_init(struct kvm_gstage *gstage, struct kvm *
>> +static inline bool kvm_riscv_hgatp_mode_is_valid(unsigned long mode)
>> +{
>> + return kvm_riscv_gstage_supported_mode_mask & BIT(mode);
>
>Shifting by more than the bit width is undefined behavior in C.
>RV64 effectively translates BIT(mode) to 1UL << (mode & 0x3f), so this
>could allow values larger than the mask.
>
Thanks for catching this.
You’re right: BIT(mode) is undefined for out-of-range shifts, and on RV64 it can
effectively mask the shift amount, potentially making invalid MODE values appear
valid. In v8 I’ll add an explicit bounds check before shifting.
Thanks,
Fangyu
>Thanks.
On Thu, Apr 2, 2026 at 6:53 PM <fangyu.yu@linux.alibaba.com> wrote:
>
> From: Fangyu Yu <fangyu.yu@linux.alibaba.com>
>
> Extend kvm_riscv_gstage_mode_detect() to record HGATP.MODE values in a
> bitmask. Keep tracking the maximum supported G-stage page table level
> for existing internal users.
>
> Also provide lightweight helpers to retrieve the supported-mode bitmask
> and validate a requested HGATP.MODE against it.
>
> Signed-off-by: Fangyu Yu <fangyu.yu@linux.alibaba.com>
> Reviewed-by: Andrew Jones <andrew.jones@oss.qualcomm.com>
> Reviewed-by: Guo Ren <guoren@kernel.org>
LGTM.
Reviewed-by: Anup Patel <anup@brainfault.org>
Thanks,
Anup
> ---
> arch/riscv/include/asm/kvm_gstage.h | 11 +++++++++++
> arch/riscv/kvm/gstage.c | 15 ++++++++++++---
> 2 files changed, 23 insertions(+), 3 deletions(-)
>
> diff --git a/arch/riscv/include/asm/kvm_gstage.h b/arch/riscv/include/asm/kvm_gstage.h
> index 70d9d483365e..bbf8f45c6563 100644
> --- a/arch/riscv/include/asm/kvm_gstage.h
> +++ b/arch/riscv/include/asm/kvm_gstage.h
> @@ -31,6 +31,7 @@ struct kvm_gstage_mapping {
> #endif
>
> extern unsigned long kvm_riscv_gstage_max_pgd_levels;
> +extern u32 kvm_riscv_gstage_supported_mode_mask;
>
> #define kvm_riscv_gstage_pgd_xbits 2
> #define kvm_riscv_gstage_pgd_size (1UL << (HGATP_PAGE_SHIFT + kvm_riscv_gstage_pgd_xbits))
> @@ -102,4 +103,14 @@ static inline void kvm_riscv_gstage_init(struct kvm_gstage *gstage, struct kvm *
> gstage->pgd_levels = kvm->arch.pgd_levels;
> }
>
> +static inline u32 kvm_riscv_get_hgatp_mode_mask(void)
> +{
> + return kvm_riscv_gstage_supported_mode_mask;
> +}
> +
> +static inline bool kvm_riscv_hgatp_mode_is_valid(unsigned long mode)
> +{
> + return kvm_riscv_gstage_supported_mode_mask & BIT(mode);
> +}
> +
> #endif
> diff --git a/arch/riscv/kvm/gstage.c b/arch/riscv/kvm/gstage.c
> index 7c4c34bc191b..9204e6427d2d 100644
> --- a/arch/riscv/kvm/gstage.c
> +++ b/arch/riscv/kvm/gstage.c
> @@ -16,6 +16,8 @@ unsigned long kvm_riscv_gstage_max_pgd_levels __ro_after_init = 3;
> #else
> unsigned long kvm_riscv_gstage_max_pgd_levels __ro_after_init = 2;
> #endif
> +/* Bitmask of supported HGATP.MODE encodings (BIT(HGATP_MODE_*)). */
> +u32 kvm_riscv_gstage_supported_mode_mask __ro_after_init;
>
> #define gstage_pte_leaf(__ptep) \
> (pte_val(*(__ptep)) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC))
> @@ -317,11 +319,17 @@ void kvm_riscv_gstage_wp_range(struct kvm_gstage *gstage, gpa_t start, gpa_t end
>
> void __init kvm_riscv_gstage_mode_detect(void)
> {
> + kvm_riscv_gstage_supported_mode_mask = 0;
> + kvm_riscv_gstage_max_pgd_levels = 0;
> +
> #ifdef CONFIG_64BIT
> /* Try Sv57x4 G-stage mode */
> csr_write(CSR_HGATP, HGATP_MODE_SV57X4 << HGATP_MODE_SHIFT);
> if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV57X4) {
> kvm_riscv_gstage_max_pgd_levels = 5;
> + kvm_riscv_gstage_supported_mode_mask |= BIT(HGATP_MODE_SV57X4) |
> + BIT(HGATP_MODE_SV48X4) |
> + BIT(HGATP_MODE_SV39X4);
> goto done;
> }
>
> @@ -329,6 +337,8 @@ void __init kvm_riscv_gstage_mode_detect(void)
> csr_write(CSR_HGATP, HGATP_MODE_SV48X4 << HGATP_MODE_SHIFT);
> if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV48X4) {
> kvm_riscv_gstage_max_pgd_levels = 4;
> + kvm_riscv_gstage_supported_mode_mask |= BIT(HGATP_MODE_SV48X4) |
> + BIT(HGATP_MODE_SV39X4);
> goto done;
> }
>
> @@ -336,6 +346,7 @@ void __init kvm_riscv_gstage_mode_detect(void)
> csr_write(CSR_HGATP, HGATP_MODE_SV39X4 << HGATP_MODE_SHIFT);
> if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV39X4) {
> kvm_riscv_gstage_max_pgd_levels = 3;
> + kvm_riscv_gstage_supported_mode_mask |= BIT(HGATP_MODE_SV39X4);
> goto done;
> }
> #else /* CONFIG_32BIT */
> @@ -343,13 +354,11 @@ void __init kvm_riscv_gstage_mode_detect(void)
> csr_write(CSR_HGATP, HGATP_MODE_SV32X4 << HGATP_MODE_SHIFT);
> if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV32X4) {
> kvm_riscv_gstage_max_pgd_levels = 2;
> + kvm_riscv_gstage_supported_mode_mask |= BIT(HGATP_MODE_SV32X4);
> goto done;
> }
> #endif
>
> - /* KVM depends on !HGATP_MODE_OFF */
> - kvm_riscv_gstage_max_pgd_levels = 0;
> -
> done:
> csr_write(CSR_HGATP, 0);
> kvm_riscv_local_hfence_gvma_all();
> --
> 2.50.1
>
© 2016 - 2026 Red Hat, Inc.