We select certain test functions which either invoke each other,
functions that are already const-ified, or no further functions.
It is therefore relatively trivial to const-ify them, which
provides a basis for further const-ification further up the call
stack.
Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
include/linux/mmzone.h | 42 +++++++++++++++++++++---------------------
1 file changed, 21 insertions(+), 21 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index f3272ef5131b..9a25fb1ade82 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1104,7 +1104,7 @@ static inline unsigned long promo_wmark_pages(const struct zone *z)
return wmark_pages(z, WMARK_PROMO);
}
-static inline unsigned long zone_managed_pages(struct zone *zone)
+static inline unsigned long zone_managed_pages(const struct zone *const zone)
{
return (unsigned long)atomic_long_read(&zone->managed_pages);
}
@@ -1128,12 +1128,12 @@ static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
}
-static inline bool zone_is_initialized(struct zone *zone)
+static inline bool zone_is_initialized(const struct zone *const zone)
{
return zone->initialized;
}
-static inline bool zone_is_empty(struct zone *zone)
+static inline bool zone_is_empty(const struct zone *const zone)
{
return zone->spanned_pages == 0;
}
@@ -1273,7 +1273,7 @@ static inline bool folio_is_zone_movable(const struct folio *folio)
* Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
* intersection with the given zone
*/
-static inline bool zone_intersects(struct zone *zone,
+static inline bool zone_intersects(const struct zone *const zone,
unsigned long start_pfn, unsigned long nr_pages)
{
if (zone_is_empty(zone))
@@ -1581,12 +1581,12 @@ static inline int local_memory_node(int node_id) { return node_id; };
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
#ifdef CONFIG_ZONE_DEVICE
-static inline bool zone_is_zone_device(struct zone *zone)
+static inline bool zone_is_zone_device(const struct zone *const zone)
{
return zone_idx(zone) == ZONE_DEVICE;
}
#else
-static inline bool zone_is_zone_device(struct zone *zone)
+static inline bool zone_is_zone_device(const struct zone *const zone)
{
return false;
}
@@ -1598,19 +1598,19 @@ static inline bool zone_is_zone_device(struct zone *zone)
* populated_zone(). If the whole zone is reserved then we can easily
* end up with populated_zone() && !managed_zone().
*/
-static inline bool managed_zone(struct zone *zone)
+static inline bool managed_zone(const struct zone *const zone)
{
return zone_managed_pages(zone);
}
/* Returns true if a zone has memory */
-static inline bool populated_zone(struct zone *zone)
+static inline bool populated_zone(const struct zone *const zone)
{
return zone->present_pages;
}
#ifdef CONFIG_NUMA
-static inline int zone_to_nid(struct zone *zone)
+static inline int zone_to_nid(const struct zone *const zone)
{
return zone->node;
}
@@ -1620,7 +1620,7 @@ static inline void zone_set_nid(struct zone *zone, int nid)
zone->node = nid;
}
#else
-static inline int zone_to_nid(struct zone *zone)
+static inline int zone_to_nid(const struct zone *zone)
{
return 0;
}
@@ -1647,7 +1647,7 @@ static inline int is_highmem_idx(enum zone_type idx)
* @zone: pointer to struct zone variable
* Return: 1 for a highmem zone, 0 otherwise
*/
-static inline int is_highmem(struct zone *zone)
+static inline int is_highmem(const struct zone *const zone)
{
return is_highmem_idx(zone_idx(zone));
}
@@ -1713,12 +1713,12 @@ static inline struct zone *zonelist_zone(struct zoneref *zoneref)
return zoneref->zone;
}
-static inline int zonelist_zone_idx(struct zoneref *zoneref)
+static inline int zonelist_zone_idx(const struct zoneref *const zoneref)
{
return zoneref->zone_idx;
}
-static inline int zonelist_node_idx(struct zoneref *zoneref)
+static inline int zonelist_node_idx(const struct zoneref *const zoneref)
{
return zone_to_nid(zoneref->zone);
}
@@ -2021,7 +2021,7 @@ static inline struct page *__section_mem_map_addr(struct mem_section *section)
return (struct page *)map;
}
-static inline int present_section(struct mem_section *section)
+static inline int present_section(const struct mem_section *const section)
{
return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
}
@@ -2031,12 +2031,12 @@ static inline int present_section_nr(unsigned long nr)
return present_section(__nr_to_section(nr));
}
-static inline int valid_section(struct mem_section *section)
+static inline int valid_section(const struct mem_section *const section)
{
return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
}
-static inline int early_section(struct mem_section *section)
+static inline int early_section(const struct mem_section *const section)
{
return (section && (section->section_mem_map & SECTION_IS_EARLY));
}
@@ -2046,27 +2046,27 @@ static inline int valid_section_nr(unsigned long nr)
return valid_section(__nr_to_section(nr));
}
-static inline int online_section(struct mem_section *section)
+static inline int online_section(const struct mem_section *const section)
{
return (section && (section->section_mem_map & SECTION_IS_ONLINE));
}
#ifdef CONFIG_ZONE_DEVICE
-static inline int online_device_section(struct mem_section *section)
+static inline int online_device_section(const struct mem_section *const section)
{
unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE;
return section && ((section->section_mem_map & flags) == flags);
}
#else
-static inline int online_device_section(struct mem_section *section)
+static inline int online_device_section(const struct mem_section *const section)
{
return 0;
}
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
-static inline int preinited_vmemmap_section(struct mem_section *section)
+static inline int preinited_vmemmap_section(const struct mem_section *const section)
{
return (section &&
(section->section_mem_map & SECTION_IS_VMEMMAP_PREINIT));
@@ -2076,7 +2076,7 @@ void sparse_vmemmap_init_nid_early(int nid);
void sparse_vmemmap_init_nid_late(int nid);
#else
-static inline int preinited_vmemmap_section(struct mem_section *section)
+static inline int preinited_vmemmap_section(const struct mem_section *const section)
{
return 0;
}
--
2.47.2
On Mon, Sep 01, 2025 at 02:30:19PM +0200, Max Kellermann wrote:
> We select certain test functions which either invoke each other,
> functions that are already const-ified, or no further functions.
>
> It is therefore relatively trivial to const-ify them, which
> provides a basis for further const-ification further up the call
> stack.
>
> Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
> Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
On basis that we figure out whether we want the "const <type> *const <param>"
thing or not, this otherwise LGTM so:
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
> ---
> include/linux/mmzone.h | 42 +++++++++++++++++++++---------------------
> 1 file changed, 21 insertions(+), 21 deletions(-)
>
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index f3272ef5131b..9a25fb1ade82 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -1104,7 +1104,7 @@ static inline unsigned long promo_wmark_pages(const struct zone *z)
> return wmark_pages(z, WMARK_PROMO);
> }
>
> -static inline unsigned long zone_managed_pages(struct zone *zone)
> +static inline unsigned long zone_managed_pages(const struct zone *const zone)
> {
> return (unsigned long)atomic_long_read(&zone->managed_pages);
> }
> @@ -1128,12 +1128,12 @@ static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
> return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
> }
>
> -static inline bool zone_is_initialized(struct zone *zone)
> +static inline bool zone_is_initialized(const struct zone *const zone)
> {
> return zone->initialized;
> }
>
> -static inline bool zone_is_empty(struct zone *zone)
> +static inline bool zone_is_empty(const struct zone *const zone)
> {
> return zone->spanned_pages == 0;
> }
> @@ -1273,7 +1273,7 @@ static inline bool folio_is_zone_movable(const struct folio *folio)
> * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
> * intersection with the given zone
> */
> -static inline bool zone_intersects(struct zone *zone,
> +static inline bool zone_intersects(const struct zone *const zone,
> unsigned long start_pfn, unsigned long nr_pages)
> {
> if (zone_is_empty(zone))
> @@ -1581,12 +1581,12 @@ static inline int local_memory_node(int node_id) { return node_id; };
> #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
>
> #ifdef CONFIG_ZONE_DEVICE
> -static inline bool zone_is_zone_device(struct zone *zone)
> +static inline bool zone_is_zone_device(const struct zone *const zone)
> {
> return zone_idx(zone) == ZONE_DEVICE;
> }
> #else
> -static inline bool zone_is_zone_device(struct zone *zone)
> +static inline bool zone_is_zone_device(const struct zone *const zone)
> {
> return false;
> }
> @@ -1598,19 +1598,19 @@ static inline bool zone_is_zone_device(struct zone *zone)
> * populated_zone(). If the whole zone is reserved then we can easily
> * end up with populated_zone() && !managed_zone().
> */
> -static inline bool managed_zone(struct zone *zone)
> +static inline bool managed_zone(const struct zone *const zone)
> {
> return zone_managed_pages(zone);
> }
>
> /* Returns true if a zone has memory */
> -static inline bool populated_zone(struct zone *zone)
> +static inline bool populated_zone(const struct zone *const zone)
> {
> return zone->present_pages;
> }
>
> #ifdef CONFIG_NUMA
> -static inline int zone_to_nid(struct zone *zone)
> +static inline int zone_to_nid(const struct zone *const zone)
> {
> return zone->node;
> }
> @@ -1620,7 +1620,7 @@ static inline void zone_set_nid(struct zone *zone, int nid)
> zone->node = nid;
> }
> #else
> -static inline int zone_to_nid(struct zone *zone)
> +static inline int zone_to_nid(const struct zone *zone)
> {
> return 0;
> }
> @@ -1647,7 +1647,7 @@ static inline int is_highmem_idx(enum zone_type idx)
> * @zone: pointer to struct zone variable
> * Return: 1 for a highmem zone, 0 otherwise
> */
> -static inline int is_highmem(struct zone *zone)
> +static inline int is_highmem(const struct zone *const zone)
> {
> return is_highmem_idx(zone_idx(zone));
> }
> @@ -1713,12 +1713,12 @@ static inline struct zone *zonelist_zone(struct zoneref *zoneref)
> return zoneref->zone;
> }
>
> -static inline int zonelist_zone_idx(struct zoneref *zoneref)
> +static inline int zonelist_zone_idx(const struct zoneref *const zoneref)
> {
> return zoneref->zone_idx;
> }
>
> -static inline int zonelist_node_idx(struct zoneref *zoneref)
> +static inline int zonelist_node_idx(const struct zoneref *const zoneref)
> {
> return zone_to_nid(zoneref->zone);
> }
> @@ -2021,7 +2021,7 @@ static inline struct page *__section_mem_map_addr(struct mem_section *section)
> return (struct page *)map;
> }
>
> -static inline int present_section(struct mem_section *section)
> +static inline int present_section(const struct mem_section *const section)
> {
> return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
> }
> @@ -2031,12 +2031,12 @@ static inline int present_section_nr(unsigned long nr)
> return present_section(__nr_to_section(nr));
> }
>
> -static inline int valid_section(struct mem_section *section)
> +static inline int valid_section(const struct mem_section *const section)
> {
> return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
> }
>
> -static inline int early_section(struct mem_section *section)
> +static inline int early_section(const struct mem_section *const section)
> {
> return (section && (section->section_mem_map & SECTION_IS_EARLY));
> }
> @@ -2046,27 +2046,27 @@ static inline int valid_section_nr(unsigned long nr)
> return valid_section(__nr_to_section(nr));
> }
>
> -static inline int online_section(struct mem_section *section)
> +static inline int online_section(const struct mem_section *const section)
> {
> return (section && (section->section_mem_map & SECTION_IS_ONLINE));
> }
>
> #ifdef CONFIG_ZONE_DEVICE
> -static inline int online_device_section(struct mem_section *section)
> +static inline int online_device_section(const struct mem_section *const section)
> {
> unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE;
>
> return section && ((section->section_mem_map & flags) == flags);
> }
> #else
> -static inline int online_device_section(struct mem_section *section)
> +static inline int online_device_section(const struct mem_section *const section)
> {
> return 0;
> }
> #endif
>
> #ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
> -static inline int preinited_vmemmap_section(struct mem_section *section)
> +static inline int preinited_vmemmap_section(const struct mem_section *const section)
> {
> return (section &&
> (section->section_mem_map & SECTION_IS_VMEMMAP_PREINIT));
> @@ -2076,7 +2076,7 @@ void sparse_vmemmap_init_nid_early(int nid);
> void sparse_vmemmap_init_nid_late(int nid);
>
> #else
> -static inline int preinited_vmemmap_section(struct mem_section *section)
> +static inline int preinited_vmemmap_section(const struct mem_section *const section)
> {
> return 0;
> }
> --
> 2.47.2
>
On 01.09.25 14:30, Max Kellermann wrote: > We select certain test functions which either invoke each other, > functions that are already const-ified, or no further functions. > > It is therefore relatively trivial to const-ify them, which > provides a basis for further const-ification further up the call > stack. > > Signed-off-by: Max Kellermann <max.kellermann@ionos.com> > Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> > --- Also some getters hiding in between the test functions. Acked-by: David Hildenbrand <david@redhat.com> -- Cheers David / dhildenb
© 2016 - 2025 Red Hat, Inc.