Mark __split_huge_pmd(), split_huge_pmd() and split_huge_pmd_address()
with __must_check so the compiler warns if any caller ignores the return
value. Not checking return value and operating on the basis that the pmd
is split could result in a kernel bug. The possibility of an order-0
allocation failing for page table allocation is very low, but it should
be handled correctly.
Signed-off-by: Usama Arif <usama.arif@linux.dev>
---
include/linux/huge_mm.h | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 224965fce4e66..c4d0badc4ce27 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -418,7 +418,7 @@ static inline int split_huge_page(struct page *page)
extern struct list_lru deferred_split_lru;
void deferred_split_folio(struct folio *folio, bool partially_mapped);
-int __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+int __must_check __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long address, bool freeze);
/**
@@ -447,7 +447,7 @@ static inline bool pmd_is_huge(pmd_t pmd)
return false;
}
-static inline int split_huge_pmd(struct vm_area_struct *vma,
+static inline int __must_check split_huge_pmd(struct vm_area_struct *vma,
pmd_t *pmd, unsigned long address)
{
if (pmd_is_huge(*pmd))
@@ -455,7 +455,7 @@ static inline int split_huge_pmd(struct vm_area_struct *vma,
return 0;
}
-int split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
+int __must_check split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
bool freeze);
void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
--
2.52.0