On Sat, Nov 15, 2025 at 06:33:59PM -0500, Pasha Tatashin wrote:
> From: Pratyush Yadav <ptyadav@amazon.de>
>
> shmem_inode_acct_blocks(), shmem_recalc_inode(), and
> shmem_add_to_page_cache() are used by shmem_alloc_and_add_folio(). This
> functionality will also be used in the future by Live Update
> Orchestrator (LUO) to recreate memfd files after a live update.
I'd rephrase this a bit to say that it will be used by memfd integration
into LUO to emphasize this stays inside mm.
Other than that
Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
>
> Signed-off-by: Pratyush Yadav <ptyadav@amazon.de>
> Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
> ---
> mm/internal.h | 6 ++++++
> mm/shmem.c | 10 +++++-----
> 2 files changed, 11 insertions(+), 5 deletions(-)
>
> diff --git a/mm/internal.h b/mm/internal.h
> index 1561fc2ff5b8..4ba155524f80 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -1562,6 +1562,12 @@ void __meminit __init_page_from_nid(unsigned long pfn, int nid);
> unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
> int priority);
>
> +int shmem_add_to_page_cache(struct folio *folio,
> + struct address_space *mapping,
> + pgoff_t index, void *expected, gfp_t gfp);
> +int shmem_inode_acct_blocks(struct inode *inode, long pages);
> +bool shmem_recalc_inode(struct inode *inode, long alloced, long swapped);
> +
> #ifdef CONFIG_SHRINKER_DEBUG
> static inline __printf(2, 0) int shrinker_debugfs_name_alloc(
> struct shrinker *shrinker, const char *fmt, va_list ap)
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 05c3db840257..c3dc4af59c14 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -219,7 +219,7 @@ static inline void shmem_unacct_blocks(unsigned long flags, long pages)
> vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
> }
>
> -static int shmem_inode_acct_blocks(struct inode *inode, long pages)
> +int shmem_inode_acct_blocks(struct inode *inode, long pages)
> {
> struct shmem_inode_info *info = SHMEM_I(inode);
> struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
> @@ -435,7 +435,7 @@ static void shmem_free_inode(struct super_block *sb, size_t freed_ispace)
> *
> * Return: true if swapped was incremented from 0, for shmem_writeout().
> */
> -static bool shmem_recalc_inode(struct inode *inode, long alloced, long swapped)
> +bool shmem_recalc_inode(struct inode *inode, long alloced, long swapped)
> {
> struct shmem_inode_info *info = SHMEM_I(inode);
> bool first_swapped = false;
> @@ -861,9 +861,9 @@ static void shmem_update_stats(struct folio *folio, int nr_pages)
> /*
> * Somewhat like filemap_add_folio, but error if expected item has gone.
> */
> -static int shmem_add_to_page_cache(struct folio *folio,
> - struct address_space *mapping,
> - pgoff_t index, void *expected, gfp_t gfp)
> +int shmem_add_to_page_cache(struct folio *folio,
> + struct address_space *mapping,
> + pgoff_t index, void *expected, gfp_t gfp)
> {
> XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
> unsigned long nr = folio_nr_pages(folio);
> --
> 2.52.0.rc1.455.g30608eb744-goog
>
--
Sincerely yours,
Mike.