mm/hugetlb.c | 6 +++--- mm/hugetlb_vmemmap.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-)
There are som typos in the code comments as follows:
differenciate ==> differentiate
refernece ==> reference
permissons ==> permissions
indepdenent ==> independent
Spliting ==> Splitting
Just fix it.
Signed-off-by: jianyun.gao <jianyungao89@gmail.com>
---
mm/hugetlb.c | 6 +++---
mm/hugetlb_vmemmap.c | 6 +++---
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index eed59cfb5d21..8ff9edd09504 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2954,7 +2954,7 @@ typedef enum {
* NOTE: This is mostly identical to MAP_CHG_NEEDED, except
* that currently vma_needs_reservation() has an unwanted side
* effect to either use end() or commit() to complete the
- * transaction. Hence it needs to differenciate from NEEDED.
+ * transaction. Hence it needs to differentiate from NEEDED.
*/
MAP_CHG_ENFORCED = 2,
} map_chg_state;
@@ -5998,7 +5998,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
/*
* If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
* could defer the flush until now, since by holding i_mmap_rwsem we
- * guaranteed that the last refernece would not be dropped. But we must
+ * guaranteed that the last reference would not be dropped. But we must
* do the flushing before we return, as otherwise i_mmap_rwsem will be
* dropped and the last reference to the shared PMDs page might be
* dropped as well.
@@ -7179,7 +7179,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
} else if (unlikely(is_pte_marker(pte))) {
/*
* Do nothing on a poison marker; page is
- * corrupted, permissons do not apply. Here
+ * corrupted, permissions do not apply. Here
* pte_marker_uffd_wp()==true implies !poison
* because they're mutual exclusive.
*/
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index ba0fb1b6a5a8..e6f79b2c63ee 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -75,7 +75,7 @@ static int vmemmap_split_pmd(pmd_t *pmd, struct page *head, unsigned long start,
if (likely(pmd_leaf(*pmd))) {
/*
* Higher order allocations from buddy allocator must be able to
- * be treated as indepdenent small pages (as they can be freed
+ * be treated as independent small pages (as they can be freed
* individually).
*/
if (!PageReserved(head))
@@ -684,7 +684,7 @@ static void __hugetlb_vmemmap_optimize_folios(struct hstate *h,
ret = hugetlb_vmemmap_split_folio(h, folio);
/*
- * Spliting the PMD requires allocating a page, thus lets fail
+ * Splitting the PMD requires allocating a page, thus lets fail
* early once we encounter the first OOM. No point in retrying
* as it can be dynamically done on remap with the memory
* we get back from the vmemmap deduplication.
@@ -715,7 +715,7 @@ static void __hugetlb_vmemmap_optimize_folios(struct hstate *h,
/*
* Pages to be freed may have been accumulated. If we
* encounter an ENOMEM, free what we have and try again.
- * This can occur in the case that both spliting fails
+ * This can occur in the case that both splitting fails
* halfway and head page allocation also failed. In this
* case __hugetlb_vmemmap_optimize_folio() would free memory
* allowing more vmemmap remaps to occur.
--
2.34.1
Superseded by: 20250929002608.1633825-1-jianyungao89@gmail.com . Please ignore / Please drop. Thanks, Jianyun On Sat, Sep 27, 2025 at 2:49 PM jianyun.gao <jianyungao89@gmail.com> wrote: > > There are som typos in the code comments as follows: > > differenciate ==> differentiate > refernece ==> reference > permissons ==> permissions > indepdenent ==> independent > Spliting ==> Splitting > > Just fix it. > > Signed-off-by: jianyun.gao <jianyungao89@gmail.com> > --- > mm/hugetlb.c | 6 +++--- > mm/hugetlb_vmemmap.c | 6 +++--- > 2 files changed, 6 insertions(+), 6 deletions(-) > > diff --git a/mm/hugetlb.c b/mm/hugetlb.c > index eed59cfb5d21..8ff9edd09504 100644 > --- a/mm/hugetlb.c > +++ b/mm/hugetlb.c > @@ -2954,7 +2954,7 @@ typedef enum { > * NOTE: This is mostly identical to MAP_CHG_NEEDED, except > * that currently vma_needs_reservation() has an unwanted side > * effect to either use end() or commit() to complete the > - * transaction. Hence it needs to differenciate from NEEDED. > + * transaction. Hence it needs to differentiate from NEEDED. > */ > MAP_CHG_ENFORCED = 2, > } map_chg_state; > @@ -5998,7 +5998,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, > /* > * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We > * could defer the flush until now, since by holding i_mmap_rwsem we > - * guaranteed that the last refernece would not be dropped. But we must > + * guaranteed that the last reference would not be dropped. But we must > * do the flushing before we return, as otherwise i_mmap_rwsem will be > * dropped and the last reference to the shared PMDs page might be > * dropped as well. > @@ -7179,7 +7179,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma, > } else if (unlikely(is_pte_marker(pte))) { > /* > * Do nothing on a poison marker; page is > - * corrupted, permissons do not apply. Here > + * corrupted, permissions do not apply. Here > * pte_marker_uffd_wp()==true implies !poison > * because they're mutual exclusive. > */ > diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c > index ba0fb1b6a5a8..e6f79b2c63ee 100644 > --- a/mm/hugetlb_vmemmap.c > +++ b/mm/hugetlb_vmemmap.c > @@ -75,7 +75,7 @@ static int vmemmap_split_pmd(pmd_t *pmd, struct page *head, unsigned long start, > if (likely(pmd_leaf(*pmd))) { > /* > * Higher order allocations from buddy allocator must be able to > - * be treated as indepdenent small pages (as they can be freed > + * be treated as independent small pages (as they can be freed > * individually). > */ > if (!PageReserved(head)) > @@ -684,7 +684,7 @@ static void __hugetlb_vmemmap_optimize_folios(struct hstate *h, > ret = hugetlb_vmemmap_split_folio(h, folio); > > /* > - * Spliting the PMD requires allocating a page, thus lets fail > + * Splitting the PMD requires allocating a page, thus lets fail > * early once we encounter the first OOM. No point in retrying > * as it can be dynamically done on remap with the memory > * we get back from the vmemmap deduplication. > @@ -715,7 +715,7 @@ static void __hugetlb_vmemmap_optimize_folios(struct hstate *h, > /* > * Pages to be freed may have been accumulated. If we > * encounter an ENOMEM, free what we have and try again. > - * This can occur in the case that both spliting fails > + * This can occur in the case that both splitting fails > * halfway and head page allocation also failed. In this > * case __hugetlb_vmemmap_optimize_folio() would free memory > * allowing more vmemmap remaps to occur. > -- > 2.34.1 >
On 27/09/25 12:19 pm, jianyun.gao wrote: > There are som typos in the code comments as follows: > > differenciate ==> differentiate > refernece ==> reference > permissons ==> permissions > indepdenent ==> independent > Spliting ==> Splitting > > Just fix it. > > Signed-off-by: jianyun.gao <jianyungao89@gmail.com> > --- > mm/hugetlb.c | 6 +++--- > mm/hugetlb_vmemmap.c | 6 +++--- > 2 files changed, 6 insertions(+), 6 deletions(-) > > diff --git a/mm/hugetlb.c b/mm/hugetlb.c > index eed59cfb5d21..8ff9edd09504 100644 > --- a/mm/hugetlb.c > +++ b/mm/hugetlb.c > @@ -2954,7 +2954,7 @@ typedef enum { > * NOTE: This is mostly identical to MAP_CHG_NEEDED, except > * that currently vma_needs_reservation() has an unwanted side > * effect to either use end() or commit() to complete the > - * transaction. Hence it needs to differenciate from NEEDED. > + * transaction. Hence it needs to differentiate from NEEDED. > */ > MAP_CHG_ENFORCED = 2, > } map_chg_state; > @@ -5998,7 +5998,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, > /* > * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We > * could defer the flush until now, since by holding i_mmap_rwsem we > - * guaranteed that the last refernece would not be dropped. But we must > + * guaranteed that the last reference would not be dropped. But we must > * do the flushing before we return, as otherwise i_mmap_rwsem will be > * dropped and the last reference to the shared PMDs page might be > * dropped as well. > @@ -7179,7 +7179,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma, > } else if (unlikely(is_pte_marker(pte))) { > /* > * Do nothing on a poison marker; page is > - * corrupted, permissons do not apply. Here > + * corrupted, permissions do not apply. Here Can also fix the extra space between "apply" and "Here". > * pte_marker_uffd_wp()==true implies !poison > * because they're mutual exclusive. > */ > diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c > index ba0fb1b6a5a8..e6f79b2c63ee 100644 > --- a/mm/hugetlb_vmemmap.c > +++ b/mm/hugetlb_vmemmap.c > @@ -75,7 +75,7 @@ static int vmemmap_split_pmd(pmd_t *pmd, struct page *head, unsigned long start, > if (likely(pmd_leaf(*pmd))) { > /* > * Higher order allocations from buddy allocator must be able to > - * be treated as indepdenent small pages (as they can be freed > + * be treated as independent small pages (as they can be freed > * individually). > */ > if (!PageReserved(head)) > @@ -684,7 +684,7 @@ static void __hugetlb_vmemmap_optimize_folios(struct hstate *h, > ret = hugetlb_vmemmap_split_folio(h, folio); > > /* > - * Spliting the PMD requires allocating a page, thus lets fail > + * Splitting the PMD requires allocating a page, thus lets fail lets -> let's or let us > * early once we encounter the first OOM. No point in retrying > * as it can be dynamically done on remap with the memory > * we get back from the vmemmap deduplication. > @@ -715,7 +715,7 @@ static void __hugetlb_vmemmap_optimize_folios(struct hstate *h, > /* > * Pages to be freed may have been accumulated. If we > * encounter an ENOMEM, free what we have and try again. > - * This can occur in the case that both spliting fails > + * This can occur in the case that both splitting fails > * halfway and head page allocation also failed. In this > * case __hugetlb_vmemmap_optimize_folio() would free memory > * allowing more vmemmap remaps to occur. As Wei says, this patch can be merged with the earlier, thanks.
On Sun, Sep 28, 2025 at 8:56 PM Dev Jain <dev.jain@arm.com> wrote: > > > On 27/09/25 12:19 pm, jianyun.gao wrote: > > There are som typos in the code comments as follows: > > > > differenciate ==> differentiate > > refernece ==> reference > > permissons ==> permissions > > indepdenent ==> independent > > Spliting ==> Splitting > > > > Just fix it. > > > > Signed-off-by: jianyun.gao <jianyungao89@gmail.com> > > --- > > mm/hugetlb.c | 6 +++--- > > mm/hugetlb_vmemmap.c | 6 +++--- > > 2 files changed, 6 insertions(+), 6 deletions(-) > > > > diff --git a/mm/hugetlb.c b/mm/hugetlb.c > > index eed59cfb5d21..8ff9edd09504 100644 > > --- a/mm/hugetlb.c > > +++ b/mm/hugetlb.c > > @@ -2954,7 +2954,7 @@ typedef enum { > > * NOTE: This is mostly identical to MAP_CHG_NEEDED, except > > * that currently vma_needs_reservation() has an unwanted side > > * effect to either use end() or commit() to complete the > > - * transaction. Hence it needs to differenciate from NEEDED. > > + * transaction. Hence it needs to differentiate from NEEDED. > > */ > > MAP_CHG_ENFORCED = 2, > > } map_chg_state; > > @@ -5998,7 +5998,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, > > /* > > * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We > > * could defer the flush until now, since by holding i_mmap_rwsem we > > - * guaranteed that the last refernece would not be dropped. But we must > > + * guaranteed that the last reference would not be dropped. But we must > > * do the flushing before we return, as otherwise i_mmap_rwsem will be > > * dropped and the last reference to the shared PMDs page might be > > * dropped as well. > > @@ -7179,7 +7179,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma, > > } else if (unlikely(is_pte_marker(pte))) { > > /* > > * Do nothing on a poison marker; page is > > - * corrupted, permissons do not apply. Here > > + * corrupted, permissions do not apply. Here > > Can also fix the extra space between "apply" and "Here". Sure, I will fix it in the next patch. > > > * pte_marker_uffd_wp()==true implies !poison > > * because they're mutual exclusive. > > */ > > diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c > > index ba0fb1b6a5a8..e6f79b2c63ee 100644 > > --- a/mm/hugetlb_vmemmap.c > > +++ b/mm/hugetlb_vmemmap.c > > @@ -75,7 +75,7 @@ static int vmemmap_split_pmd(pmd_t *pmd, struct page *head, unsigned long start, > > if (likely(pmd_leaf(*pmd))) { > > /* > > * Higher order allocations from buddy allocator must be able to > > - * be treated as indepdenent small pages (as they can be freed > > + * be treated as independent small pages (as they can be freed > > * individually). > > */ > > if (!PageReserved(head)) > > @@ -684,7 +684,7 @@ static void __hugetlb_vmemmap_optimize_folios(struct hstate *h, > > ret = hugetlb_vmemmap_split_folio(h, folio); > > > > /* > > - * Spliting the PMD requires allocating a page, thus lets fail > > + * Splitting the PMD requires allocating a page, thus lets fail > > lets -> let's or let us Okay, I will also fix it in the next patch. > > > * early once we encounter the first OOM. No point in retrying > > * as it can be dynamically done on remap with the memory > > * we get back from the vmemmap deduplication. > > @@ -715,7 +715,7 @@ static void __hugetlb_vmemmap_optimize_folios(struct hstate *h, > > /* > > * Pages to be freed may have been accumulated. If we > > * encounter an ENOMEM, free what we have and try again. > > - * This can occur in the case that both spliting fails > > + * This can occur in the case that both splitting fails > > * halfway and head page allocation also failed. In this > > * case __hugetlb_vmemmap_optimize_folio() would free memory > > * allowing more vmemmap remaps to occur. > > As Wei says, this patch can be merged with the earlier, thanks. > Get it, I will merge this patch to the next one. Thank you very much for your review!
On Sat, Sep 27, 2025 at 02:49:26PM +0800, jianyun.gao wrote: >There are som typos in the code comments as follows: > > differenciate ==> differentiate > refernece ==> reference > permissons ==> permissions > indepdenent ==> independent > Spliting ==> Splitting > >Just fix it. > >Signed-off-by: jianyun.gao <jianyungao89@gmail.com> The change looks good. But maybe we can merge it with the next one you sent? They both belongs to mm. -- Wei Yang Help you, Help me
Hello Wei, Thank you for your reviews. On Sun, Sep 28, 2025 at 9:32 AM Wei Yang <richard.weiyang@gmail.com> wrote: > > On Sat, Sep 27, 2025 at 02:49:26PM +0800, jianyun.gao wrote: > >There are som typos in the code comments as follows: > > > > differenciate ==> differentiate > > refernece ==> reference > > permissons ==> permissions > > indepdenent ==> independent > > Spliting ==> Splitting > > > >Just fix it. > > > >Signed-off-by: jianyun.gao <jianyungao89@gmail.com> > > The change looks good. > > But maybe we can merge it with the next one you sent? They both belongs to mm. It`s Okey. Thank you! > > -- > Wei Yang > Help you, Help me
© 2016 - 2025 Red Hat, Inc.