[PATCH v5 02/12] mm: constify pagemap related test functions for improved const-correctness

Max Kellermann posted 12 patches 3 months, 1 week ago
There is a newer version of this series
[PATCH v5 02/12] mm: constify pagemap related test functions for improved const-correctness
Posted by Max Kellermann 3 months, 1 week ago
We select certain test functions which either invoke each other,
functions that are already const-ified, or no further functions.

It is therefore relatively trivial to const-ify them, which
provides a basis for further const-ification further up the call
stack.

Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
 include/linux/pagemap.h | 57 +++++++++++++++++++++--------------------
 1 file changed, 29 insertions(+), 28 deletions(-)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index a3e16d74792f..1d35f9e1416e 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -140,7 +140,7 @@ static inline int inode_drain_writes(struct inode *inode)
 	return filemap_write_and_wait(inode->i_mapping);
 }
 
-static inline bool mapping_empty(struct address_space *mapping)
+static inline bool mapping_empty(const struct address_space *const mapping)
 {
 	return xa_empty(&mapping->i_pages);
 }
@@ -166,7 +166,7 @@ static inline bool mapping_empty(struct address_space *mapping)
  * refcount and the referenced bit, which will be elevated or set in
  * the process of adding new cache pages to an inode.
  */
-static inline bool mapping_shrinkable(struct address_space *mapping)
+static inline bool mapping_shrinkable(const struct address_space *const mapping)
 {
 	void *head;
 
@@ -267,7 +267,7 @@ static inline void mapping_clear_unevictable(struct address_space *mapping)
 	clear_bit(AS_UNEVICTABLE, &mapping->flags);
 }
 
-static inline bool mapping_unevictable(struct address_space *mapping)
+static inline bool mapping_unevictable(const struct address_space *const mapping)
 {
 	return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
 }
@@ -277,7 +277,7 @@ static inline void mapping_set_exiting(struct address_space *mapping)
 	set_bit(AS_EXITING, &mapping->flags);
 }
 
-static inline int mapping_exiting(struct address_space *mapping)
+static inline int mapping_exiting(const struct address_space *const mapping)
 {
 	return test_bit(AS_EXITING, &mapping->flags);
 }
@@ -287,7 +287,7 @@ static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
 	set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
 }
 
-static inline int mapping_use_writeback_tags(struct address_space *mapping)
+static inline int mapping_use_writeback_tags(const struct address_space *const mapping)
 {
 	return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
 }
@@ -333,7 +333,7 @@ static inline void mapping_set_inaccessible(struct address_space *mapping)
 	set_bit(AS_INACCESSIBLE, &mapping->flags);
 }
 
-static inline bool mapping_inaccessible(struct address_space *mapping)
+static inline bool mapping_inaccessible(const struct address_space *const mapping)
 {
 	return test_bit(AS_INACCESSIBLE, &mapping->flags);
 }
@@ -343,18 +343,18 @@ static inline void mapping_set_writeback_may_deadlock_on_reclaim(struct address_
 	set_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
 }
 
-static inline bool mapping_writeback_may_deadlock_on_reclaim(struct address_space *mapping)
+static inline bool mapping_writeback_may_deadlock_on_reclaim(const struct address_space *const mapping)
 {
 	return test_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
 }
 
-static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
+static inline gfp_t mapping_gfp_mask(const struct address_space *const mapping)
 {
 	return mapping->gfp_mask;
 }
 
 /* Restricts the given gfp_mask to what the mapping allows. */
-static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
+static inline gfp_t mapping_gfp_constraint(const struct address_space *mapping,
 		gfp_t gfp_mask)
 {
 	return mapping_gfp_mask(mapping) & gfp_mask;
@@ -477,13 +477,13 @@ mapping_min_folio_order(const struct address_space *mapping)
 }
 
 static inline unsigned long
-mapping_min_folio_nrpages(struct address_space *mapping)
+mapping_min_folio_nrpages(const struct address_space *const mapping)
 {
 	return 1UL << mapping_min_folio_order(mapping);
 }
 
 static inline unsigned long
-mapping_min_folio_nrbytes(struct address_space *mapping)
+mapping_min_folio_nrbytes(const struct address_space *const mapping)
 {
 	return mapping_min_folio_nrpages(mapping) << PAGE_SHIFT;
 }
@@ -497,7 +497,7 @@ mapping_min_folio_nrbytes(struct address_space *mapping)
  * new folio to the page cache and need to know what index to give it,
  * call this function.
  */
-static inline pgoff_t mapping_align_index(struct address_space *mapping,
+static inline pgoff_t mapping_align_index(const struct address_space *const mapping,
 					  pgoff_t index)
 {
 	return round_down(index, mapping_min_folio_nrpages(mapping));
@@ -507,7 +507,7 @@ static inline pgoff_t mapping_align_index(struct address_space *mapping,
  * Large folio support currently depends on THP.  These dependencies are
  * being worked on but are not yet fixed.
  */
-static inline bool mapping_large_folio_support(struct address_space *mapping)
+static inline bool mapping_large_folio_support(const struct address_space *mapping)
 {
 	/* AS_FOLIO_ORDER is only reasonable for pagecache folios */
 	VM_WARN_ONCE((unsigned long)mapping & FOLIO_MAPPING_ANON,
@@ -522,7 +522,7 @@ static inline size_t mapping_max_folio_size(const struct address_space *mapping)
 	return PAGE_SIZE << mapping_max_folio_order(mapping);
 }
 
-static inline int filemap_nr_thps(struct address_space *mapping)
+static inline int filemap_nr_thps(const struct address_space *const mapping)
 {
 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
 	return atomic_read(&mapping->nr_thps);
@@ -936,7 +936,7 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
  *
  * Return: The index of the folio which follows this folio in the file.
  */
-static inline pgoff_t folio_next_index(struct folio *folio)
+static inline pgoff_t folio_next_index(const struct folio *const folio)
 {
 	return folio->index + folio_nr_pages(folio);
 }
@@ -965,7 +965,7 @@ static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
  * e.g., shmem did not move this folio to the swap cache.
  * Return: true or false.
  */
-static inline bool folio_contains(struct folio *folio, pgoff_t index)
+static inline bool folio_contains(const struct folio *const folio, pgoff_t index)
 {
 	VM_WARN_ON_ONCE_FOLIO(folio_test_swapcache(folio), folio);
 	return index - folio->index < folio_nr_pages(folio);
@@ -1042,13 +1042,13 @@ static inline loff_t page_offset(struct page *page)
 /*
  * Get the offset in PAGE_SIZE (even for hugetlb folios).
  */
-static inline pgoff_t folio_pgoff(struct folio *folio)
+static inline pgoff_t folio_pgoff(const struct folio *const folio)
 {
 	return folio->index;
 }
 
-static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
-					unsigned long address)
+static inline pgoff_t linear_page_index(const struct vm_area_struct *const vma,
+					const unsigned long address)
 {
 	pgoff_t pgoff;
 	pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
@@ -1468,7 +1468,7 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
  * readahead_pos - The byte offset into the file of this readahead request.
  * @rac: The readahead request.
  */
-static inline loff_t readahead_pos(struct readahead_control *rac)
+static inline loff_t readahead_pos(const struct readahead_control *const rac)
 {
 	return (loff_t)rac->_index * PAGE_SIZE;
 }
@@ -1477,7 +1477,7 @@ static inline loff_t readahead_pos(struct readahead_control *rac)
  * readahead_length - The number of bytes in this readahead request.
  * @rac: The readahead request.
  */
-static inline size_t readahead_length(struct readahead_control *rac)
+static inline size_t readahead_length(const struct readahead_control *const rac)
 {
 	return rac->_nr_pages * PAGE_SIZE;
 }
@@ -1486,7 +1486,7 @@ static inline size_t readahead_length(struct readahead_control *rac)
  * readahead_index - The index of the first page in this readahead request.
  * @rac: The readahead request.
  */
-static inline pgoff_t readahead_index(struct readahead_control *rac)
+static inline pgoff_t readahead_index(const struct readahead_control *const rac)
 {
 	return rac->_index;
 }
@@ -1495,7 +1495,7 @@ static inline pgoff_t readahead_index(struct readahead_control *rac)
  * readahead_count - The number of pages in this readahead request.
  * @rac: The readahead request.
  */
-static inline unsigned int readahead_count(struct readahead_control *rac)
+static inline unsigned int readahead_count(const struct readahead_control *const rac)
 {
 	return rac->_nr_pages;
 }
@@ -1504,12 +1504,12 @@ static inline unsigned int readahead_count(struct readahead_control *rac)
  * readahead_batch_length - The number of bytes in the current batch.
  * @rac: The readahead request.
  */
-static inline size_t readahead_batch_length(struct readahead_control *rac)
+static inline size_t readahead_batch_length(const struct readahead_control *const rac)
 {
 	return rac->_batch_count * PAGE_SIZE;
 }
 
-static inline unsigned long dir_pages(struct inode *inode)
+static inline unsigned long dir_pages(const struct inode *const inode)
 {
 	return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
 			       PAGE_SHIFT;
@@ -1523,8 +1523,8 @@ static inline unsigned long dir_pages(struct inode *inode)
  * Return: the number of bytes in the folio up to EOF,
  * or -EFAULT if the folio was truncated.
  */
-static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
-					      struct inode *inode)
+static inline ssize_t folio_mkwrite_check_truncate(const struct folio *const folio,
+						   const struct inode *const inode)
 {
 	loff_t size = i_size_read(inode);
 	pgoff_t index = size >> PAGE_SHIFT;
@@ -1555,7 +1555,8 @@ static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
  * Return: The number of filesystem blocks covered by this folio.
  */
 static inline
-unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio)
+unsigned int i_blocks_per_folio(const struct inode *const inode,
+				const struct folio *const folio)
 {
 	return folio_size(folio) >> inode->i_blkbits;
 }
-- 
2.47.2
Re: [PATCH v5 02/12] mm: constify pagemap related test functions for improved const-correctness
Posted by Lorenzo Stoakes 3 months, 1 week ago
On Mon, Sep 01, 2025 at 02:30:18PM +0200, Max Kellermann wrote:
> We select certain test functions which either invoke each other,
> functions that are already const-ified, or no further functions.
>
> It is therefore relatively trivial to const-ify them, which
> provides a basis for further const-ification further up the call
> stack.
>
> Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
> Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
> ---
>  include/linux/pagemap.h | 57 +++++++++++++++++++++--------------------
>  1 file changed, 29 insertions(+), 28 deletions(-)
>
> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
> index a3e16d74792f..1d35f9e1416e 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -140,7 +140,7 @@ static inline int inode_drain_writes(struct inode *inode)
>  	return filemap_write_and_wait(inode->i_mapping);
>  }
>
> -static inline bool mapping_empty(struct address_space *mapping)
> +static inline bool mapping_empty(const struct address_space *const mapping)

Generally - I'm not sure how useful this 'double' const-ification is.

const struct <type> *const <val>
  ^                    ^
  |                    |
  |                    |
  1                    2

Means:

1. (most useful) Const pointer (const <type> *<param>) means that the dereffed
   value is const, so *<param> = <val> or <param>-><field> = <val> are prohibited.

2. (less useful) We can't modify the actual pointer value either, so
   e.g. <param> = <new param> is prohibited.

I mean it's kinda nice to guarantee that this won't happen but I'm not sure if
we're getting much value for the noise?

We also never mention that we're doing this in any commit message or the cover
letter.

>  {
>  	return xa_empty(&mapping->i_pages);
>  }
> @@ -166,7 +166,7 @@ static inline bool mapping_empty(struct address_space *mapping)
>   * refcount and the referenced bit, which will be elevated or set in
>   * the process of adding new cache pages to an inode.
>   */
> -static inline bool mapping_shrinkable(struct address_space *mapping)
> +static inline bool mapping_shrinkable(const struct address_space *const mapping)
>  {
>  	void *head;
>
> @@ -267,7 +267,7 @@ static inline void mapping_clear_unevictable(struct address_space *mapping)
>  	clear_bit(AS_UNEVICTABLE, &mapping->flags);
>  }
>
> -static inline bool mapping_unevictable(struct address_space *mapping)
> +static inline bool mapping_unevictable(const struct address_space *const mapping)
>  {
>  	return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
>  }
> @@ -277,7 +277,7 @@ static inline void mapping_set_exiting(struct address_space *mapping)
>  	set_bit(AS_EXITING, &mapping->flags);
>  }
>
> -static inline int mapping_exiting(struct address_space *mapping)
> +static inline int mapping_exiting(const struct address_space *const mapping)
>  {
>  	return test_bit(AS_EXITING, &mapping->flags);
>  }
> @@ -287,7 +287,7 @@ static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
>  	set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
>  }
>
> -static inline int mapping_use_writeback_tags(struct address_space *mapping)
> +static inline int mapping_use_writeback_tags(const struct address_space *const mapping)
>  {
>  	return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
>  }
> @@ -333,7 +333,7 @@ static inline void mapping_set_inaccessible(struct address_space *mapping)
>  	set_bit(AS_INACCESSIBLE, &mapping->flags);
>  }
>
> -static inline bool mapping_inaccessible(struct address_space *mapping)
> +static inline bool mapping_inaccessible(const struct address_space *const mapping)
>  {
>  	return test_bit(AS_INACCESSIBLE, &mapping->flags);
>  }
> @@ -343,18 +343,18 @@ static inline void mapping_set_writeback_may_deadlock_on_reclaim(struct address_
>  	set_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
>  }
>
> -static inline bool mapping_writeback_may_deadlock_on_reclaim(struct address_space *mapping)
> +static inline bool mapping_writeback_may_deadlock_on_reclaim(const struct address_space *const mapping)
>  {
>  	return test_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
>  }
>
> -static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
> +static inline gfp_t mapping_gfp_mask(const struct address_space *const mapping)
>  {
>  	return mapping->gfp_mask;
>  }
>
>  /* Restricts the given gfp_mask to what the mapping allows. */
> -static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
> +static inline gfp_t mapping_gfp_constraint(const struct address_space *mapping,
>  		gfp_t gfp_mask)
>  {
>  	return mapping_gfp_mask(mapping) & gfp_mask;
> @@ -477,13 +477,13 @@ mapping_min_folio_order(const struct address_space *mapping)
>  }
>
>  static inline unsigned long
> -mapping_min_folio_nrpages(struct address_space *mapping)
> +mapping_min_folio_nrpages(const struct address_space *const mapping)
>  {
>  	return 1UL << mapping_min_folio_order(mapping);
>  }
>
>  static inline unsigned long
> -mapping_min_folio_nrbytes(struct address_space *mapping)
> +mapping_min_folio_nrbytes(const struct address_space *const mapping)
>  {
>  	return mapping_min_folio_nrpages(mapping) << PAGE_SHIFT;
>  }
> @@ -497,7 +497,7 @@ mapping_min_folio_nrbytes(struct address_space *mapping)
>   * new folio to the page cache and need to know what index to give it,
>   * call this function.
>   */
> -static inline pgoff_t mapping_align_index(struct address_space *mapping,
> +static inline pgoff_t mapping_align_index(const struct address_space *const mapping,
>  					  pgoff_t index)
>  {
>  	return round_down(index, mapping_min_folio_nrpages(mapping));
> @@ -507,7 +507,7 @@ static inline pgoff_t mapping_align_index(struct address_space *mapping,
>   * Large folio support currently depends on THP.  These dependencies are
>   * being worked on but are not yet fixed.
>   */
> -static inline bool mapping_large_folio_support(struct address_space *mapping)
> +static inline bool mapping_large_folio_support(const struct address_space *mapping)
>  {
>  	/* AS_FOLIO_ORDER is only reasonable for pagecache folios */
>  	VM_WARN_ONCE((unsigned long)mapping & FOLIO_MAPPING_ANON,
> @@ -522,7 +522,7 @@ static inline size_t mapping_max_folio_size(const struct address_space *mapping)
>  	return PAGE_SIZE << mapping_max_folio_order(mapping);
>  }
>
> -static inline int filemap_nr_thps(struct address_space *mapping)
> +static inline int filemap_nr_thps(const struct address_space *const mapping)
>  {
>  #ifdef CONFIG_READ_ONLY_THP_FOR_FS
>  	return atomic_read(&mapping->nr_thps);
> @@ -936,7 +936,7 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
>   *
>   * Return: The index of the folio which follows this folio in the file.
>   */
> -static inline pgoff_t folio_next_index(struct folio *folio)
> +static inline pgoff_t folio_next_index(const struct folio *const folio)
>  {
>  	return folio->index + folio_nr_pages(folio);
>  }
> @@ -965,7 +965,7 @@ static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
>   * e.g., shmem did not move this folio to the swap cache.
>   * Return: true or false.
>   */
> -static inline bool folio_contains(struct folio *folio, pgoff_t index)
> +static inline bool folio_contains(const struct folio *const folio, pgoff_t index)
>  {
>  	VM_WARN_ON_ONCE_FOLIO(folio_test_swapcache(folio), folio);
>  	return index - folio->index < folio_nr_pages(folio);
> @@ -1042,13 +1042,13 @@ static inline loff_t page_offset(struct page *page)
>  /*
>   * Get the offset in PAGE_SIZE (even for hugetlb folios).
>   */
> -static inline pgoff_t folio_pgoff(struct folio *folio)
> +static inline pgoff_t folio_pgoff(const struct folio *const folio)
>  {
>  	return folio->index;
>  }
>
> -static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
> -					unsigned long address)
> +static inline pgoff_t linear_page_index(const struct vm_area_struct *const vma,
> +					const unsigned long address)
>  {
>  	pgoff_t pgoff;
>  	pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
> @@ -1468,7 +1468,7 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
>   * readahead_pos - The byte offset into the file of this readahead request.
>   * @rac: The readahead request.
>   */
> -static inline loff_t readahead_pos(struct readahead_control *rac)
> +static inline loff_t readahead_pos(const struct readahead_control *const rac)
>  {
>  	return (loff_t)rac->_index * PAGE_SIZE;
>  }
> @@ -1477,7 +1477,7 @@ static inline loff_t readahead_pos(struct readahead_control *rac)
>   * readahead_length - The number of bytes in this readahead request.
>   * @rac: The readahead request.
>   */
> -static inline size_t readahead_length(struct readahead_control *rac)
> +static inline size_t readahead_length(const struct readahead_control *const rac)
>  {
>  	return rac->_nr_pages * PAGE_SIZE;
>  }
> @@ -1486,7 +1486,7 @@ static inline size_t readahead_length(struct readahead_control *rac)
>   * readahead_index - The index of the first page in this readahead request.
>   * @rac: The readahead request.
>   */
> -static inline pgoff_t readahead_index(struct readahead_control *rac)
> +static inline pgoff_t readahead_index(const struct readahead_control *const rac)
>  {
>  	return rac->_index;
>  }
> @@ -1495,7 +1495,7 @@ static inline pgoff_t readahead_index(struct readahead_control *rac)
>   * readahead_count - The number of pages in this readahead request.
>   * @rac: The readahead request.
>   */
> -static inline unsigned int readahead_count(struct readahead_control *rac)
> +static inline unsigned int readahead_count(const struct readahead_control *const rac)
>  {
>  	return rac->_nr_pages;
>  }
> @@ -1504,12 +1504,12 @@ static inline unsigned int readahead_count(struct readahead_control *rac)
>   * readahead_batch_length - The number of bytes in the current batch.
>   * @rac: The readahead request.
>   */
> -static inline size_t readahead_batch_length(struct readahead_control *rac)
> +static inline size_t readahead_batch_length(const struct readahead_control *const rac)
>  {
>  	return rac->_batch_count * PAGE_SIZE;
>  }
>
> -static inline unsigned long dir_pages(struct inode *inode)
> +static inline unsigned long dir_pages(const struct inode *const inode)
>  {
>  	return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
>  			       PAGE_SHIFT;
> @@ -1523,8 +1523,8 @@ static inline unsigned long dir_pages(struct inode *inode)
>   * Return: the number of bytes in the folio up to EOF,
>   * or -EFAULT if the folio was truncated.
>   */
> -static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
> -					      struct inode *inode)
> +static inline ssize_t folio_mkwrite_check_truncate(const struct folio *const folio,
> +						   const struct inode *const inode)
>  {
>  	loff_t size = i_size_read(inode);
>  	pgoff_t index = size >> PAGE_SHIFT;
> @@ -1555,7 +1555,8 @@ static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
>   * Return: The number of filesystem blocks covered by this folio.
>   */
>  static inline
> -unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio)
> +unsigned int i_blocks_per_folio(const struct inode *const inode,
> +				const struct folio *const folio)
>  {
>  	return folio_size(folio) >> inode->i_blkbits;
>  }
> --
> 2.47.2
>
Re: [PATCH v5 02/12] mm: constify pagemap related test functions for improved const-correctness
Posted by Max Kellermann 3 months, 1 week ago
On Mon, Sep 1, 2025 at 4:25 PM Lorenzo Stoakes
<lorenzo.stoakes@oracle.com> wrote:
> 1. (most useful) Const pointer (const <type> *<param>) means that the dereffed
>    value is const, so *<param> = <val> or <param>-><field> = <val> are prohibited.

Only this was what my initial patch was about.

> 2. (less useful) We can't modify the actual pointer value either, so
>    e.g. <param> = <new param> is prohibited.

This wasn't my idea, it was Andrew Morton's idea, supported by Yuanchu Xie:
 https://lore.kernel.org/lkml/CAJj2-QHVC0QW_4X95LLAnM=1g6apH==-OXZu65SVeBj0tSUcBg@mail.gmail.com/
You know that because you participated in the discussion. In that
thread, nobody objected, so I took the time and adjusted all of my
patches.
There is some value, but of course it's very small.

Note that I added the value-level "const" only to the implementation,
never to prototypes, because it would have no effect there.
Re: [PATCH v5 02/12] mm: constify pagemap related test functions for improved const-correctness
Posted by Lorenzo Stoakes 3 months, 1 week ago
On Mon, Sep 01, 2025 at 04:50:50PM +0200, Max Kellermann wrote:
> On Mon, Sep 1, 2025 at 4:25 PM Lorenzo Stoakes
> <lorenzo.stoakes@oracle.com> wrote:
> > 1. (most useful) Const pointer (const <type> *<param>) means that the dereffed
> >    value is const, so *<param> = <val> or <param>-><field> = <val> are prohibited.
>
> Only this was what my initial patch was about.

Right agreed then.

>
> > 2. (less useful) We can't modify the actual pointer value either, so
> >    e.g. <param> = <new param> is prohibited.
>
> This wasn't my idea, it was Andrew Morton's idea, supported by Yuanchu Xie:
>  https://lore.kernel.org/lkml/CAJj2-QHVC0QW_4X95LLAnM=1g6apH==-OXZu65SVeBj0tSUcBg@mail.gmail.com/

Andrew said:

"Not that I'm suggesting that someone go in and make this change."

And Yuanchu said:

"Longer function readability would benefit from that, but it's IMO infeasible to
do so everywhere."

(he also mentions it'd be good if gcc could wran on it).

So this isn't quite true actually.

Let's please just drop this, sorry.

The noise for multiple const params is too much, we can do it on a case-by-case
basis going forward for larger functions.

> You know that because you participated in the discussion. In that

A point of advice given how this series has gone so far - please don't say
things like this :)

I have an extremely heavy review load, and often work 12 hour days. I don't
necessarily see everything though I try to.

I had missed this.

Just add a little civility and empathy it really helps, thanks.

> thread, nobody objected, so I took the time and adjusted all of my
> patches.
> There is some value, but of course it's very small.

Yeah sorry that you had extra work as a result.

But let's please drop this.

>
> Note that I added the value-level "const" only to the implementation,
> never to prototypes, because it would have no effect there.

Right.

Thanks, Lorenzo
Re: [PATCH v5 02/12] mm: constify pagemap related test functions for improved const-correctness
Posted by Vlastimil Babka 3 months, 1 week ago
On 9/1/25 17:14, Lorenzo Stoakes wrote:
> On Mon, Sep 01, 2025 at 04:50:50PM +0200, Max Kellermann wrote:
>> On Mon, Sep 1, 2025 at 4:25 PM Lorenzo Stoakes
>> <lorenzo.stoakes@oracle.com> wrote:
>> > 1. (most useful) Const pointer (const <type> *<param>) means that the dereffed
>> >    value is const, so *<param> = <val> or <param>-><field> = <val> are prohibited.
>>
>> Only this was what my initial patch was about.
> 
> Right agreed then.
> 
>>
>> > 2. (less useful) We can't modify the actual pointer value either, so
>> >    e.g. <param> = <new param> is prohibited.
>>
>> This wasn't my idea, it was Andrew Morton's idea, supported by Yuanchu Xie:
>>  https://lore.kernel.org/lkml/CAJj2-QHVC0QW_4X95LLAnM=1g6apH==-OXZu65SVeBj0tSUcBg@mail.gmail.com/
> 
> Andrew said:
> 
> "Not that I'm suggesting that someone go in and make this change."
> 
> And Yuanchu said:
> 
> "Longer function readability would benefit from that, but it's IMO infeasible to
> do so everywhere."
> 
> (he also mentions it'd be good if gcc could wran on it).
>
> So this isn't quite true actually.

I understood it the same, that it would be nice if gcc treated incoming
params (i.e. pointers, not pointed-to values) as const and warn otherwise,
but not suggesting we should start doing that manually.

I personally agree that adding those extra "const" is of little value and
makes the function definition lines longer and harder to read and so would
rather not add those.

I mean we could first collectively decide (and that's not a review
half-suggesting we do them) that we want them, and document that, but AFAIK
that's not the case yet. While there's already an agreement that const
pointed-to values is a good thing and nobody objects that.

Re: [PATCH v5 02/12] mm: constify pagemap related test functions for improved const-correctness
Posted by David Hildenbrand 3 months, 1 week ago
On 01.09.25 17:47, Vlastimil Babka wrote:
> On 9/1/25 17:14, Lorenzo Stoakes wrote:
>> On Mon, Sep 01, 2025 at 04:50:50PM +0200, Max Kellermann wrote:
>>> On Mon, Sep 1, 2025 at 4:25 PM Lorenzo Stoakes
>>> <lorenzo.stoakes@oracle.com> wrote:
>>>> 1. (most useful) Const pointer (const <type> *<param>) means that the dereffed
>>>>     value is const, so *<param> = <val> or <param>-><field> = <val> are prohibited.
>>>
>>> Only this was what my initial patch was about.
>>
>> Right agreed then.
>>
>>>
>>>> 2. (less useful) We can't modify the actual pointer value either, so
>>>>     e.g. <param> = <new param> is prohibited.
>>>
>>> This wasn't my idea, it was Andrew Morton's idea, supported by Yuanchu Xie:
>>>   https://lore.kernel.org/lkml/CAJj2-QHVC0QW_4X95LLAnM=1g6apH==-OXZu65SVeBj0tSUcBg@mail.gmail.com/
>>
>> Andrew said:
>>
>> "Not that I'm suggesting that someone go in and make this change."
>>
>> And Yuanchu said:
>>
>> "Longer function readability would benefit from that, but it's IMO infeasible to
>> do so everywhere."
>>
>> (he also mentions it'd be good if gcc could wran on it).
>>
>> So this isn't quite true actually.
> 
> I understood it the same, that it would be nice if gcc treated incoming
> params (i.e. pointers, not pointed-to values) as const and warn otherwise,
> but not suggesting we should start doing that manually.
> 
> I personally agree that adding those extra "const" is of little value and
> makes the function definition lines longer and harder to read and so would
> rather not add those.
> 
> I mean we could first collectively decide (and that's not a review
> half-suggesting we do them) that we want them, and document that, but AFAIK
> that's not the case yet. While there's already an agreement that const
> pointed-to values is a good thing and nobody objects that.

Yeah, and discussed elsewhere in this series, it would also have to be 
clarified how to deal with the *const" (or const values in general) with 
function declaration vs. definition. I don't think we really have 
written-down rule for that yet.

-- 
Cheers

David / dhildenb

Re: [PATCH v5 02/12] mm: constify pagemap related test functions for improved const-correctness
Posted by Lorenzo Stoakes 3 months, 1 week ago
On Mon, Sep 01, 2025 at 05:52:47PM +0200, David Hildenbrand wrote:
> On 01.09.25 17:47, Vlastimil Babka wrote:
> > On 9/1/25 17:14, Lorenzo Stoakes wrote:
> > > On Mon, Sep 01, 2025 at 04:50:50PM +0200, Max Kellermann wrote:
> > > > On Mon, Sep 1, 2025 at 4:25 PM Lorenzo Stoakes
> > > > <lorenzo.stoakes@oracle.com> wrote:
> > > > > 1. (most useful) Const pointer (const <type> *<param>) means that the dereffed
> > > > >     value is const, so *<param> = <val> or <param>-><field> = <val> are prohibited.
> > > >
> > > > Only this was what my initial patch was about.
> > >
> > > Right agreed then.
> > >
> > > >
> > > > > 2. (less useful) We can't modify the actual pointer value either, so
> > > > >     e.g. <param> = <new param> is prohibited.
> > > >
> > > > This wasn't my idea, it was Andrew Morton's idea, supported by Yuanchu Xie:
> > > >   https://lore.kernel.org/lkml/CAJj2-QHVC0QW_4X95LLAnM=1g6apH==-OXZu65SVeBj0tSUcBg@mail.gmail.com/
> > >
> > > Andrew said:
> > >
> > > "Not that I'm suggesting that someone go in and make this change."
> > >
> > > And Yuanchu said:
> > >
> > > "Longer function readability would benefit from that, but it's IMO infeasible to
> > > do so everywhere."
> > >
> > > (he also mentions it'd be good if gcc could wran on it).
> > >
> > > So this isn't quite true actually.
> >
> > I understood it the same, that it would be nice if gcc treated incoming
> > params (i.e. pointers, not pointed-to values) as const and warn otherwise,
> > but not suggesting we should start doing that manually.
> >
> > I personally agree that adding those extra "const" is of little value and
> > makes the function definition lines longer and harder to read and so would
> > rather not add those.
> >
> > I mean we could first collectively decide (and that's not a review
> > half-suggesting we do them) that we want them, and document that, but AFAIK
> > that's not the case yet. While there's already an agreement that const
> > pointed-to values is a good thing and nobody objects that.
>
> Yeah, and discussed elsewhere in this series, it would also have to be
> clarified how to deal with the *const" (or const values in general) with
> function declaration vs. definition. I don't think we really have
> written-down rule for that yet.

For this series the consensus is clear that we should eliminate these and revert
to const pointed-to values (1) only.

We can determine how we do this in future re: const actual pointers (2), but
this series isn't the place.

So Max - can you respin with the (2) const-ification removed please.

Thanks.
Re: [PATCH v5 02/12] mm: constify pagemap related test functions for improved const-correctness
Posted by Lorenzo Stoakes 3 months, 1 week ago
On Mon, Sep 01, 2025 at 02:30:18PM +0200, Max Kellermann wrote:
> We select certain test functions which either invoke each other,
> functions that are already const-ified, or no further functions.
>
> It is therefore relatively trivial to const-ify them, which
> provides a basis for further const-ification further up the call
> stack.
>
> Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
> Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>

LGTM, so:

Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>

> ---
>  include/linux/pagemap.h | 57 +++++++++++++++++++++--------------------
>  1 file changed, 29 insertions(+), 28 deletions(-)
>
> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
> index a3e16d74792f..1d35f9e1416e 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -140,7 +140,7 @@ static inline int inode_drain_writes(struct inode *inode)
>  	return filemap_write_and_wait(inode->i_mapping);
>  }
>
> -static inline bool mapping_empty(struct address_space *mapping)
> +static inline bool mapping_empty(const struct address_space *const mapping)
>  {
>  	return xa_empty(&mapping->i_pages);
>  }
> @@ -166,7 +166,7 @@ static inline bool mapping_empty(struct address_space *mapping)
>   * refcount and the referenced bit, which will be elevated or set in
>   * the process of adding new cache pages to an inode.
>   */
> -static inline bool mapping_shrinkable(struct address_space *mapping)
> +static inline bool mapping_shrinkable(const struct address_space *const mapping)
>  {
>  	void *head;
>
> @@ -267,7 +267,7 @@ static inline void mapping_clear_unevictable(struct address_space *mapping)
>  	clear_bit(AS_UNEVICTABLE, &mapping->flags);
>  }
>
> -static inline bool mapping_unevictable(struct address_space *mapping)
> +static inline bool mapping_unevictable(const struct address_space *const mapping)
>  {
>  	return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
>  }
> @@ -277,7 +277,7 @@ static inline void mapping_set_exiting(struct address_space *mapping)
>  	set_bit(AS_EXITING, &mapping->flags);
>  }
>
> -static inline int mapping_exiting(struct address_space *mapping)
> +static inline int mapping_exiting(const struct address_space *const mapping)
>  {
>  	return test_bit(AS_EXITING, &mapping->flags);
>  }
> @@ -287,7 +287,7 @@ static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
>  	set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
>  }
>
> -static inline int mapping_use_writeback_tags(struct address_space *mapping)
> +static inline int mapping_use_writeback_tags(const struct address_space *const mapping)
>  {
>  	return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
>  }
> @@ -333,7 +333,7 @@ static inline void mapping_set_inaccessible(struct address_space *mapping)
>  	set_bit(AS_INACCESSIBLE, &mapping->flags);
>  }
>
> -static inline bool mapping_inaccessible(struct address_space *mapping)
> +static inline bool mapping_inaccessible(const struct address_space *const mapping)
>  {
>  	return test_bit(AS_INACCESSIBLE, &mapping->flags);
>  }
> @@ -343,18 +343,18 @@ static inline void mapping_set_writeback_may_deadlock_on_reclaim(struct address_
>  	set_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
>  }
>
> -static inline bool mapping_writeback_may_deadlock_on_reclaim(struct address_space *mapping)
> +static inline bool mapping_writeback_may_deadlock_on_reclaim(const struct address_space *const mapping)
>  {
>  	return test_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
>  }
>
> -static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
> +static inline gfp_t mapping_gfp_mask(const struct address_space *const mapping)
>  {
>  	return mapping->gfp_mask;
>  }
>
>  /* Restricts the given gfp_mask to what the mapping allows. */
> -static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
> +static inline gfp_t mapping_gfp_constraint(const struct address_space *mapping,
>  		gfp_t gfp_mask)
>  {
>  	return mapping_gfp_mask(mapping) & gfp_mask;
> @@ -477,13 +477,13 @@ mapping_min_folio_order(const struct address_space *mapping)
>  }
>
>  static inline unsigned long
> -mapping_min_folio_nrpages(struct address_space *mapping)
> +mapping_min_folio_nrpages(const struct address_space *const mapping)
>  {
>  	return 1UL << mapping_min_folio_order(mapping);
>  }
>
>  static inline unsigned long
> -mapping_min_folio_nrbytes(struct address_space *mapping)
> +mapping_min_folio_nrbytes(const struct address_space *const mapping)
>  {
>  	return mapping_min_folio_nrpages(mapping) << PAGE_SHIFT;
>  }
> @@ -497,7 +497,7 @@ mapping_min_folio_nrbytes(struct address_space *mapping)
>   * new folio to the page cache and need to know what index to give it,
>   * call this function.
>   */
> -static inline pgoff_t mapping_align_index(struct address_space *mapping,
> +static inline pgoff_t mapping_align_index(const struct address_space *const mapping,
>  					  pgoff_t index)
>  {
>  	return round_down(index, mapping_min_folio_nrpages(mapping));
> @@ -507,7 +507,7 @@ static inline pgoff_t mapping_align_index(struct address_space *mapping,
>   * Large folio support currently depends on THP.  These dependencies are
>   * being worked on but are not yet fixed.
>   */
> -static inline bool mapping_large_folio_support(struct address_space *mapping)
> +static inline bool mapping_large_folio_support(const struct address_space *mapping)
>  {
>  	/* AS_FOLIO_ORDER is only reasonable for pagecache folios */
>  	VM_WARN_ONCE((unsigned long)mapping & FOLIO_MAPPING_ANON,
> @@ -522,7 +522,7 @@ static inline size_t mapping_max_folio_size(const struct address_space *mapping)
>  	return PAGE_SIZE << mapping_max_folio_order(mapping);
>  }
>
> -static inline int filemap_nr_thps(struct address_space *mapping)
> +static inline int filemap_nr_thps(const struct address_space *const mapping)
>  {
>  #ifdef CONFIG_READ_ONLY_THP_FOR_FS
>  	return atomic_read(&mapping->nr_thps);
> @@ -936,7 +936,7 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
>   *
>   * Return: The index of the folio which follows this folio in the file.
>   */
> -static inline pgoff_t folio_next_index(struct folio *folio)
> +static inline pgoff_t folio_next_index(const struct folio *const folio)
>  {
>  	return folio->index + folio_nr_pages(folio);
>  }
> @@ -965,7 +965,7 @@ static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
>   * e.g., shmem did not move this folio to the swap cache.
>   * Return: true or false.
>   */
> -static inline bool folio_contains(struct folio *folio, pgoff_t index)
> +static inline bool folio_contains(const struct folio *const folio, pgoff_t index)
>  {
>  	VM_WARN_ON_ONCE_FOLIO(folio_test_swapcache(folio), folio);
>  	return index - folio->index < folio_nr_pages(folio);
> @@ -1042,13 +1042,13 @@ static inline loff_t page_offset(struct page *page)
>  /*
>   * Get the offset in PAGE_SIZE (even for hugetlb folios).
>   */
> -static inline pgoff_t folio_pgoff(struct folio *folio)
> +static inline pgoff_t folio_pgoff(const struct folio *const folio)
>  {
>  	return folio->index;
>  }
>
> -static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
> -					unsigned long address)
> +static inline pgoff_t linear_page_index(const struct vm_area_struct *const vma,
> +					const unsigned long address)
>  {
>  	pgoff_t pgoff;
>  	pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
> @@ -1468,7 +1468,7 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
>   * readahead_pos - The byte offset into the file of this readahead request.
>   * @rac: The readahead request.
>   */
> -static inline loff_t readahead_pos(struct readahead_control *rac)
> +static inline loff_t readahead_pos(const struct readahead_control *const rac)
>  {
>  	return (loff_t)rac->_index * PAGE_SIZE;
>  }
> @@ -1477,7 +1477,7 @@ static inline loff_t readahead_pos(struct readahead_control *rac)
>   * readahead_length - The number of bytes in this readahead request.
>   * @rac: The readahead request.
>   */
> -static inline size_t readahead_length(struct readahead_control *rac)
> +static inline size_t readahead_length(const struct readahead_control *const rac)
>  {
>  	return rac->_nr_pages * PAGE_SIZE;
>  }
> @@ -1486,7 +1486,7 @@ static inline size_t readahead_length(struct readahead_control *rac)
>   * readahead_index - The index of the first page in this readahead request.
>   * @rac: The readahead request.
>   */
> -static inline pgoff_t readahead_index(struct readahead_control *rac)
> +static inline pgoff_t readahead_index(const struct readahead_control *const rac)
>  {
>  	return rac->_index;
>  }
> @@ -1495,7 +1495,7 @@ static inline pgoff_t readahead_index(struct readahead_control *rac)
>   * readahead_count - The number of pages in this readahead request.
>   * @rac: The readahead request.
>   */
> -static inline unsigned int readahead_count(struct readahead_control *rac)
> +static inline unsigned int readahead_count(const struct readahead_control *const rac)
>  {
>  	return rac->_nr_pages;
>  }
> @@ -1504,12 +1504,12 @@ static inline unsigned int readahead_count(struct readahead_control *rac)
>   * readahead_batch_length - The number of bytes in the current batch.
>   * @rac: The readahead request.
>   */
> -static inline size_t readahead_batch_length(struct readahead_control *rac)
> +static inline size_t readahead_batch_length(const struct readahead_control *const rac)
>  {
>  	return rac->_batch_count * PAGE_SIZE;
>  }
>
> -static inline unsigned long dir_pages(struct inode *inode)
> +static inline unsigned long dir_pages(const struct inode *const inode)
>  {
>  	return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
>  			       PAGE_SHIFT;
> @@ -1523,8 +1523,8 @@ static inline unsigned long dir_pages(struct inode *inode)
>   * Return: the number of bytes in the folio up to EOF,
>   * or -EFAULT if the folio was truncated.
>   */
> -static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
> -					      struct inode *inode)
> +static inline ssize_t folio_mkwrite_check_truncate(const struct folio *const folio,
> +						   const struct inode *const inode)
>  {
>  	loff_t size = i_size_read(inode);
>  	pgoff_t index = size >> PAGE_SHIFT;
> @@ -1555,7 +1555,8 @@ static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
>   * Return: The number of filesystem blocks covered by this folio.
>   */
>  static inline
> -unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio)
> +unsigned int i_blocks_per_folio(const struct inode *const inode,
> +				const struct folio *const folio)
>  {
>  	return folio_size(folio) >> inode->i_blkbits;
>  }
> --
> 2.47.2
>
Re: [PATCH v5 02/12] mm: constify pagemap related test functions for improved const-correctness
Posted by David Hildenbrand 3 months, 1 week ago
On 01.09.25 14:30, Max Kellermann wrote:
> We select certain test functions which either invoke each other,
> functions that are already const-ified, or no further functions.
> 
> It is therefore relatively trivial to const-ify them, which
> provides a basis for further const-ification further up the call
> stack.
> 
> Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
> Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
> ---

Not only test functions but also some other "getters" in there.

Acked-by: David Hildenbrand <david@redhat.com>

-- 
Cheers

David / dhildenb
Re: [PATCH v5 02/12] mm: constify pagemap related test functions for improved const-correctness
Posted by Max Kellermann 3 months, 1 week ago
On Mon, Sep 1, 2025 at 3:49 PM David Hildenbrand <david@redhat.com> wrote:
> Not only test functions but also some other "getters" in there.

Oh, I thought getter/test function means the same for you. I'll amend the text.