mm/hugetlb.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-)
Signed-off-by: Biancaa Ramesh <biancaa2210329@ssn.edu.in>
---
mm/hugetlb.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 795ee393eac0..1760fa39a3af 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2934,7 +2934,7 @@ typedef enum {
* NOTE: This is mostly identical to MAP_CHG_NEEDED, except
* that currently vma_needs_reservation() has an unwanted side
* effect to either use end() or commit() to complete the
- * transaction. Hence it needs to differenciate from NEEDED.
+ * transaction. Hence it needs to differentiate from NEEDED.
*/
MAP_CHG_ENFORCED = 2,
} map_chg_state;
@@ -5983,7 +5983,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
int rc = vma_needs_reservation(h, vma, address);
if (rc < 0)
- /* Pressumably allocate_file_region_entries failed
+ /* Presumably allocate_file_region_entries failed
* to allocate a file_region struct. Clear
* hugetlb_restore_reserve so that global reserve
* count will not be incremented by free_huge_folio.
@@ -6007,7 +6007,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
/*
* If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
* could defer the flush until now, since by holding i_mmap_rwsem we
- * guaranteed that the last refernece would not be dropped. But we must
+ * guaranteed that the last reference would not be dropped. But we must
* do the flushing before we return, as otherwise i_mmap_rwsem will be
* dropped and the last reference to the shared PMDs page might be
* dropped as well.
@@ -6211,7 +6211,7 @@ static vm_fault_t hugetlb_wp(struct vm_fault *vmf)
* In order to determine where this is a COW on a MAP_PRIVATE mapping it
* is enough to check whether the old_folio is anonymous. This means that
* the reserve for this address was consumed. If reserves were used, a
- * partial faulted mapping at the fime of fork() could consume its reserves
+ * partial faulted mapping at the time of fork() could consume its reserves
* on COW instead of the full address range.
*/
if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
@@ -7193,7 +7193,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
} else if (unlikely(is_pte_marker(pte))) {
/*
* Do nothing on a poison marker; page is
- * corrupted, permissons do not apply. Here
+ * corrupted, permissions do not apply. Here
* pte_marker_uffd_wp()==true implies !poison
* because they're mutual exclusive.
*/
--
2.43.0
--
::DISCLAIMER::
---------------------------------------------------------------------
The
contents of this e-mail and any attachment(s) are confidential and
intended
for the named recipient(s) only. Views or opinions, if any,
presented in
this email are solely those of the author and may not
necessarily reflect
the views or opinions of SSN Institutions (SSN) or its
affiliates. Any form
of reproduction, dissemination, copying, disclosure,
modification,
distribution and / or publication of this message without the
prior written
consent of authorized representative of SSN is strictly
prohibited. If you
have received this email in error please delete it and
notify the sender
immediately.
---------------------------------------------------------------------
Header of this mail should have a valid DKIM signature for the domain
ssn.edu.in <http://www.ssn.edu.in/>
© 2016 - 2025 Red Hat, Inc.