mm/pt_reclaim.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-)
When !start_pte is true, the "pml" spinlock is still being holded and
the branch "out_pte" is taken. If "ptl" is equal to "pml", the lock
"pml" will still be locked when the function returns.
It'll be better to set a new branch "out_pte" and jump to it when
!start_pte is true at the first place, therefore no additional check for
"start_pte" or "ptl != pml" is needed, simply unlock "pml" and return.
Signed-off-by: I Hsin Cheng <richard120310@gmail.com>
---
mm/pt_reclaim.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/mm/pt_reclaim.c b/mm/pt_reclaim.c
index 7e9455a18aae..163e38f1728d 100644
--- a/mm/pt_reclaim.c
+++ b/mm/pt_reclaim.c
@@ -43,7 +43,7 @@ void try_to_free_pte(struct mm_struct *mm, pmd_t *pmd, unsigned long addr,
pml = pmd_lock(mm, pmd);
start_pte = pte_offset_map_rw_nolock(mm, pmd, addr, &pmdval, &ptl);
if (!start_pte)
- goto out_ptl;
+ goto out_pte;
if (ptl != pml)
spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
@@ -68,4 +68,8 @@ void try_to_free_pte(struct mm_struct *mm, pmd_t *pmd, unsigned long addr,
pte_unmap_unlock(start_pte, ptl);
if (ptl != pml)
spin_unlock(pml);
+ return;
+
+out_pte:
+ spin_unlock(pml);
}
--
2.43.0
On 2025/2/9 02:49, I Hsin Cheng wrote: > When !start_pte is true, the "pml" spinlock is still being holded and > the branch "out_pte" is taken. If "ptl" is equal to "pml", the lock > "pml" will still be locked when the function returns. No. When start_pte is NULL, the ptl must also be NULL, so the ptl and pml will not be equal. > > It'll be better to set a new branch "out_pte" and jump to it when > !start_pte is true at the first place, therefore no additional check for > "start_pte" or "ptl != pml" is needed, simply unlock "pml" and return. > > Signed-off-by: I Hsin Cheng <richard120310@gmail.com> > --- > mm/pt_reclaim.c | 6 +++++- > 1 file changed, 5 insertions(+), 1 deletion(-) > > diff --git a/mm/pt_reclaim.c b/mm/pt_reclaim.c > index 7e9455a18aae..163e38f1728d 100644 > --- a/mm/pt_reclaim.c > +++ b/mm/pt_reclaim.c > @@ -43,7 +43,7 @@ void try_to_free_pte(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, > pml = pmd_lock(mm, pmd); > start_pte = pte_offset_map_rw_nolock(mm, pmd, addr, &pmdval, &ptl); > if (!start_pte) > - goto out_ptl; > + goto out_pte; > if (ptl != pml) > spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); > > @@ -68,4 +68,8 @@ void try_to_free_pte(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, > pte_unmap_unlock(start_pte, ptl); > if (ptl != pml) > spin_unlock(pml); > + return; > + > +out_pte: > + spin_unlock(pml); > }
On Mon, Feb 10, 2025 at 12:05:05PM +0800, Qi Zheng wrote: > > > On 2025/2/9 02:49, I Hsin Cheng wrote: > > When !start_pte is true, the "pml" spinlock is still being holded and > > the branch "out_pte" is taken. If "ptl" is equal to "pml", the lock > > "pml" will still be locked when the function returns. > > No. When start_pte is NULL, the ptl must also be NULL, so the ptl and > pml will not be equal. > > > > > It'll be better to set a new branch "out_pte" and jump to it when > > !start_pte is true at the first place, therefore no additional check for > > "start_pte" or "ptl != pml" is needed, simply unlock "pml" and return. > > > > Signed-off-by: I Hsin Cheng <richard120310@gmail.com> > > --- > > mm/pt_reclaim.c | 6 +++++- > > 1 file changed, 5 insertions(+), 1 deletion(-) > > > > diff --git a/mm/pt_reclaim.c b/mm/pt_reclaim.c > > index 7e9455a18aae..163e38f1728d 100644 > > --- a/mm/pt_reclaim.c > > +++ b/mm/pt_reclaim.c > > @@ -43,7 +43,7 @@ void try_to_free_pte(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, > > pml = pmd_lock(mm, pmd); > > start_pte = pte_offset_map_rw_nolock(mm, pmd, addr, &pmdval, &ptl); > > if (!start_pte) > > - goto out_ptl; > > + goto out_pte; > > if (ptl != pml) > > spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); > > @@ -68,4 +68,8 @@ void try_to_free_pte(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, > > pte_unmap_unlock(start_pte, ptl); > > if (ptl != pml) > > spin_unlock(pml); > > + return; > > + > > +out_pte: > > + spin_unlock(pml); > > } Hi Qi, Thanks for your kindly review! > No. When start_pte is NULL, the ptl must also be NULL, so the ptl and > pml will not be equal. Since this is the case, we don't have to do any addtional check for "start_pte" and "ptl != pml", we can be sure that only the second check will be true so we can unlock "pml" without the redundant check, that's my understanding, what do you think? Best regards, I Hsin Cheng
On 2025/2/10 16:20, I Hsin Cheng wrote: > On Mon, Feb 10, 2025 at 12:05:05PM +0800, Qi Zheng wrote: >> >> >> On 2025/2/9 02:49, I Hsin Cheng wrote: >>> When !start_pte is true, the "pml" spinlock is still being holded and >>> the branch "out_pte" is taken. If "ptl" is equal to "pml", the lock >>> "pml" will still be locked when the function returns. >> >> No. When start_pte is NULL, the ptl must also be NULL, so the ptl and >> pml will not be equal. >> >>> >>> It'll be better to set a new branch "out_pte" and jump to it when >>> !start_pte is true at the first place, therefore no additional check for >>> "start_pte" or "ptl != pml" is needed, simply unlock "pml" and return. >>> >>> Signed-off-by: I Hsin Cheng <richard120310@gmail.com> >>> --- >>> mm/pt_reclaim.c | 6 +++++- >>> 1 file changed, 5 insertions(+), 1 deletion(-) >>> >>> diff --git a/mm/pt_reclaim.c b/mm/pt_reclaim.c >>> index 7e9455a18aae..163e38f1728d 100644 >>> --- a/mm/pt_reclaim.c >>> +++ b/mm/pt_reclaim.c >>> @@ -43,7 +43,7 @@ void try_to_free_pte(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, >>> pml = pmd_lock(mm, pmd); >>> start_pte = pte_offset_map_rw_nolock(mm, pmd, addr, &pmdval, &ptl); >>> if (!start_pte) >>> - goto out_ptl; >>> + goto out_pte; >>> if (ptl != pml) >>> spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); >>> @@ -68,4 +68,8 @@ void try_to_free_pte(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, >>> pte_unmap_unlock(start_pte, ptl); >>> if (ptl != pml) >>> spin_unlock(pml); >>> + return; >>> + >>> +out_pte: >>> + spin_unlock(pml); >>> } > > Hi Qi, > > Thanks for your kindly review! > >> No. When start_pte is NULL, the ptl must also be NULL, so the ptl and >> pml will not be equal. > > Since this is the case, we don't have to do any addtional check for > "start_pte" and "ptl != pml", we can be sure that only the second check > will be true so we can unlock "pml" without the redundant check, that's > my understanding, what do you think? Adding a label vs Redundant check in rare cases Not sure if this is worth it. ;) > > Best regards, > I Hsin Cheng > >
On 2025/2/10 16:31, Qi Zheng wrote:
>
[...]
>>>>
>>>> diff --git a/mm/pt_reclaim.c b/mm/pt_reclaim.c
>>>> index 7e9455a18aae..163e38f1728d 100644
>>>> --- a/mm/pt_reclaim.c
>>>> +++ b/mm/pt_reclaim.c
>>>> @@ -43,7 +43,7 @@ void try_to_free_pte(struct mm_struct *mm, pmd_t
>>>> *pmd, unsigned long addr,
>>>> pml = pmd_lock(mm, pmd);
>>>> start_pte = pte_offset_map_rw_nolock(mm, pmd, addr, &pmdval,
>>>> &ptl);
>>>> if (!start_pte)
Maybe we can return directly here:
if (!start_pte) {
spin_unlock(pml);
return;
}
>>>> - goto out_ptl;
>>>> + goto out_pte;
>>>> if (ptl != pml)
>>>> spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
>>>> @@ -68,4 +68,8 @@ void try_to_free_pte(struct mm_struct *mm, pmd_t
>>>> *pmd, unsigned long addr,
>>>> pte_unmap_unlock(start_pte, ptl);
>>>> if (ptl != pml)
>>>> spin_unlock(pml);
>>>> + return;
>>>> +
>>>> +out_pte:
>>>> + spin_unlock(pml);
>>>> }
>>
On Mon, Feb 10, 2025 at 04:42:13PM +0800, Qi Zheng wrote:
>
>
> On 2025/2/10 16:31, Qi Zheng wrote:
> >
>
> [...]
>
> > > > >
> > > > > diff --git a/mm/pt_reclaim.c b/mm/pt_reclaim.c
> > > > > index 7e9455a18aae..163e38f1728d 100644
> > > > > --- a/mm/pt_reclaim.c
> > > > > +++ b/mm/pt_reclaim.c
> > > > > @@ -43,7 +43,7 @@ void try_to_free_pte(struct mm_struct *mm,
> > > > > pmd_t *pmd, unsigned long addr,
> > > > > pml = pmd_lock(mm, pmd);
> > > > > start_pte = pte_offset_map_rw_nolock(mm, pmd, addr,
> > > > > &pmdval, &ptl);
> > > > > if (!start_pte)
>
> Maybe we can return directly here:
>
> if (!start_pte) {
> spin_unlock(pml);
> return;
> }
>
> > > > > - goto out_ptl;
> > > > > + goto out_pte;
> > > > > if (ptl != pml)
> > > > > spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
> > > > > @@ -68,4 +68,8 @@ void try_to_free_pte(struct mm_struct *mm,
> > > > > pmd_t *pmd, unsigned long addr,
> > > > > pte_unmap_unlock(start_pte, ptl);
> > > > > if (ptl != pml)
> > > > > spin_unlock(pml);
> > > > > + return;
> > > > > +
> > > > > +out_pte:
> > > > > + spin_unlock(pml);
> > > > > }
> > >
I've send a new patch stating for this change and change the title,
because pml will sure get unlocked, we just prevent the redundant
branches.
Best regards,
I Hsin Cheng
On Mon, Feb 10, 2025 at 04:42:13PM +0800, Qi Zheng wrote:
>
>
> On 2025/2/10 16:31, Qi Zheng wrote:
> >
>
> [...]
>
> > > > >
> > > > > diff --git a/mm/pt_reclaim.c b/mm/pt_reclaim.c
> > > > > index 7e9455a18aae..163e38f1728d 100644
> > > > > --- a/mm/pt_reclaim.c
> > > > > +++ b/mm/pt_reclaim.c
> > > > > @@ -43,7 +43,7 @@ void try_to_free_pte(struct mm_struct *mm,
> > > > > pmd_t *pmd, unsigned long addr,
> > > > > pml = pmd_lock(mm, pmd);
> > > > > start_pte = pte_offset_map_rw_nolock(mm, pmd, addr,
> > > > > &pmdval, &ptl);
> > > > > if (!start_pte)
>
> Maybe we can return directly here:
>
> if (!start_pte) {
> spin_unlock(pml);
> return;
> }
>
> > > > > - goto out_ptl;
> > > > > + goto out_pte;
> > > > > if (ptl != pml)
> > > > > spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
> > > > > @@ -68,4 +68,8 @@ void try_to_free_pte(struct mm_struct *mm,
> > > > > pmd_t *pmd, unsigned long addr,
> > > > > pte_unmap_unlock(start_pte, ptl);
> > > > > if (ptl != pml)
> > > > > spin_unlock(pml);
> > > > > + return;
> > > > > +
> > > > > +out_pte:
> > > > > + spin_unlock(pml);
> > > > > }
> > >
Hi Qi,
> Maybe we can return directly here:
>
> if (!start_pte) {
> spin_unlock(pml);
> return;
> }
Ahh that's right I think it's better than adding another label,
it doesn't even need to jump.
Should I send patch v2 for it or wait for the maintainer's review
first?
Best regards,
I Hsin Cheng
© 2016 - 2026 Red Hat, Inc.