Use use_alternative_unlikely() to check for RISCV_ISA_EXT_SVVPTC,
replacing the use of asm goto with ALTERNATIVE.
The "unlikely" variant is used to match the behavior of the original
implementation using ALTERNATIVE("nop", "j %l[svvptc]", ...).
Signed-off-by: Vivian Wang <wangruikang@iscas.ac.cn>
---
arch/riscv/include/asm/pgtable.h | 15 +++++++--------
arch/riscv/mm/pgtable.c | 22 ++++++++++------------
2 files changed, 17 insertions(+), 20 deletions(-)
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 91697fbf1f9013005800f713797e4b6b1fc8d312..81eb386da837f064c7372530e2f2227575a703d3 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -495,8 +495,13 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
struct vm_area_struct *vma, unsigned long address,
pte_t *ptep, unsigned int nr)
{
- asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1)
- : : : : svvptc);
+ /*
+ * Svvptc guarantees that the new valid pte will be visible within
+ * a bounded timeframe, so when the uarch does not cache invalid
+ * entries, we don't have to do anything.
+ */
+ if (use_alternative_unlikely(0, RISCV_ISA_EXT_SVVPTC))
+ return;
/*
* The kernel assumes that TLBs don't cache invalid entries, but
@@ -508,12 +513,6 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
while (nr--)
local_flush_tlb_page(address + nr * PAGE_SIZE);
-svvptc:;
- /*
- * Svvptc guarantees that the new valid pte will be visible within
- * a bounded timeframe, so when the uarch does not cache invalid
- * entries, we don't have to do anything.
- */
}
#define update_mmu_cache(vma, addr, ptep) \
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c
index 8b6c0a112a8db4e91de54c3bd3bd527a605a6197..e0c414fa0d433fdc39c80ec390c467ca59a9a334 100644
--- a/arch/riscv/mm/pgtable.c
+++ b/arch/riscv/mm/pgtable.c
@@ -9,8 +9,16 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty)
{
- asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1)
- : : : : svvptc);
+ if (use_alternative_unlikely(0, RISCV_ISA_EXT_SVVPTC)) {
+ if (!pte_same(ptep_get(ptep), entry)) {
+ __set_pte_at(vma->vm_mm, ptep, entry);
+ /* Here only not svadu is impacted */
+ flush_tlb_page(vma, address);
+ return true;
+ }
+
+ return false;
+ }
if (!pte_same(ptep_get(ptep), entry))
__set_pte_at(vma->vm_mm, ptep, entry);
@@ -19,16 +27,6 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
* the case that the PTE changed and the spurious fault case.
*/
return true;
-
-svvptc:
- if (!pte_same(ptep_get(ptep), entry)) {
- __set_pte_at(vma->vm_mm, ptep, entry);
- /* Here only not svadu is impacted */
- flush_tlb_page(vma, address);
- return true;
- }
-
- return false;
}
int ptep_test_and_clear_young(struct vm_area_struct *vma,
--
2.50.1
I see you have a woman's name "Vivian": so am I to assume you are one of the beloved "trannies" linus threw all the MAMALLLLEEE hackers (who helped him from 1993-2003...2008) for? Or are China-wumans just more competent than whites? On Wed, Aug 20, 2025 at 9:51 AM Vivian Wang <wangruikang@iscas.ac.cn> wrote: > > Use use_alternative_unlikely() to check for RISCV_ISA_EXT_SVVPTC, > replacing the use of asm goto with ALTERNATIVE. > > The "unlikely" variant is used to match the behavior of the original > implementation using ALTERNATIVE("nop", "j %l[svvptc]", ...). > > Signed-off-by: Vivian Wang <wangruikang@iscas.ac.cn> > --- > arch/riscv/include/asm/pgtable.h | 15 +++++++-------- > arch/riscv/mm/pgtable.c | 22 ++++++++++------------ > 2 files changed, 17 insertions(+), 20 deletions(-) > > diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h > index 91697fbf1f9013005800f713797e4b6b1fc8d312..81eb386da837f064c7372530e2f2227575a703d3 100644 > --- a/arch/riscv/include/asm/pgtable.h > +++ b/arch/riscv/include/asm/pgtable.h > @@ -495,8 +495,13 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, > struct vm_area_struct *vma, unsigned long address, > pte_t *ptep, unsigned int nr) > { > - asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1) > - : : : : svvptc); > + /* > + * Svvptc guarantees that the new valid pte will be visible within > + * a bounded timeframe, so when the uarch does not cache invalid > + * entries, we don't have to do anything. > + */ > + if (use_alternative_unlikely(0, RISCV_ISA_EXT_SVVPTC)) > + return; > > /* > * The kernel assumes that TLBs don't cache invalid entries, but > @@ -508,12 +513,6 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, > while (nr--) > local_flush_tlb_page(address + nr * PAGE_SIZE); > > -svvptc:; > - /* > - * Svvptc guarantees that the new valid pte will be visible within > - * a bounded timeframe, so when the uarch does not cache invalid > - * entries, we don't have to do anything. > - */ > } > #define update_mmu_cache(vma, addr, ptep) \ > update_mmu_cache_range(NULL, vma, addr, ptep, 1) > diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c > index 8b6c0a112a8db4e91de54c3bd3bd527a605a6197..e0c414fa0d433fdc39c80ec390c467ca59a9a334 100644 > --- a/arch/riscv/mm/pgtable.c > +++ b/arch/riscv/mm/pgtable.c > @@ -9,8 +9,16 @@ int ptep_set_access_flags(struct vm_area_struct *vma, > unsigned long address, pte_t *ptep, > pte_t entry, int dirty) > { > - asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1) > - : : : : svvptc); > + if (use_alternative_unlikely(0, RISCV_ISA_EXT_SVVPTC)) { > + if (!pte_same(ptep_get(ptep), entry)) { > + __set_pte_at(vma->vm_mm, ptep, entry); > + /* Here only not svadu is impacted */ > + flush_tlb_page(vma, address); > + return true; > + } > + > + return false; > + } > > if (!pte_same(ptep_get(ptep), entry)) > __set_pte_at(vma->vm_mm, ptep, entry); > @@ -19,16 +27,6 @@ int ptep_set_access_flags(struct vm_area_struct *vma, > * the case that the PTE changed and the spurious fault case. > */ > return true; > - > -svvptc: > - if (!pte_same(ptep_get(ptep), entry)) { > - __set_pte_at(vma->vm_mm, ptep, entry); > - /* Here only not svadu is impacted */ > - flush_tlb_page(vma, address); > - return true; > - } > - > - return false; > } > > int ptep_test_and_clear_young(struct vm_area_struct *vma, > > -- > 2.50.1 > >
© 2016 - 2025 Red Hat, Inc.