From: Kairui Song <kasong@tencent.com>
With slot cache gone, clean up the allocation helpers even more.
folio_alloc_swap will be the only entry for allocation and adding
the folio to swap cache (except suspend), making it opposite of
folio_free_swap.
Signed-off-by: Kairui Song <kasong@tencent.com>
---
include/linux/swap.h | 8 ++--
mm/shmem.c | 21 +++------
mm/swap.h | 6 ---
mm/swap_state.c | 57 ----------------------
mm/swapfile.c | 110 ++++++++++++++++++++++++++++---------------
mm/vmscan.c | 16 ++++++-
6 files changed, 94 insertions(+), 124 deletions(-)
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 456833705ea0..e799e965dac8 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -478,7 +478,7 @@ static inline long get_nr_swap_pages(void)
}
extern void si_swapinfo(struct sysinfo *);
-swp_entry_t folio_alloc_swap(struct folio *folio);
+bool folio_alloc_swap(struct folio *folio, gfp_t gfp_mask);
bool folio_free_swap(struct folio *folio);
void put_swap_folio(struct folio *folio, swp_entry_t entry);
extern swp_entry_t get_swap_page_of_type(int);
@@ -587,11 +587,9 @@ static inline int swp_swapcount(swp_entry_t entry)
return 0;
}
-static inline swp_entry_t folio_alloc_swap(struct folio *folio)
+static bool folio_alloc_swap(struct folio *folio, gfp_t gfp_mask);
{
- swp_entry_t entry;
- entry.val = 0;
- return entry;
+ return false;
}
static inline bool folio_free_swap(struct folio *folio)
diff --git a/mm/shmem.c b/mm/shmem.c
index b35ba250c53d..2aa206b52ff2 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1546,7 +1546,6 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
struct inode *inode = mapping->host;
struct shmem_inode_info *info = SHMEM_I(inode);
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
- swp_entry_t swap;
pgoff_t index;
int nr_pages;
bool split = false;
@@ -1628,14 +1627,6 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
folio_mark_uptodate(folio);
}
- swap = folio_alloc_swap(folio);
- if (!swap.val) {
- if (nr_pages > 1)
- goto try_split;
-
- goto redirty;
- }
-
/*
* Add inode to shmem_unuse()'s list of swapped-out inodes,
* if it's not already there. Do it now before the folio is
@@ -1648,20 +1639,20 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
if (list_empty(&info->swaplist))
list_add(&info->swaplist, &shmem_swaplist);
- if (add_to_swap_cache(folio, swap,
- __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
- NULL) == 0) {
+ if (folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
shmem_recalc_inode(inode, 0, nr_pages);
- swap_shmem_alloc(swap, nr_pages);
- shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap));
+ swap_shmem_alloc(folio->swap, nr_pages);
+ shmem_delete_from_page_cache(folio, swp_to_radix_entry(folio->swap));
mutex_unlock(&shmem_swaplist_mutex);
BUG_ON(folio_mapped(folio));
return swap_writepage(&folio->page, wbc);
}
+ list_del_init(&info->swaplist);
mutex_unlock(&shmem_swaplist_mutex);
- put_swap_folio(folio, swap);
+ if (nr_pages > 1)
+ goto try_split;
redirty:
folio_mark_dirty(folio);
if (wbc->for_reclaim)
diff --git a/mm/swap.h b/mm/swap.h
index ad2f121de970..0abb68091b4f 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -50,7 +50,6 @@ static inline pgoff_t swap_cache_index(swp_entry_t entry)
}
void show_swap_cache_info(void);
-bool add_to_swap(struct folio *folio);
void *get_shadow_from_swap_cache(swp_entry_t entry);
int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
gfp_t gfp, void **shadowp);
@@ -163,11 +162,6 @@ struct folio *filemap_get_incore_folio(struct address_space *mapping,
return filemap_get_folio(mapping, index);
}
-static inline bool add_to_swap(struct folio *folio)
-{
- return false;
-}
-
static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
{
return NULL;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 2b5744e211cd..68fd981b514f 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -166,63 +166,6 @@ void __delete_from_swap_cache(struct folio *folio,
__lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
}
-/**
- * add_to_swap - allocate swap space for a folio
- * @folio: folio we want to move to swap
- *
- * Allocate swap space for the folio and add the folio to the
- * swap cache.
- *
- * Context: Caller needs to hold the folio lock.
- * Return: Whether the folio was added to the swap cache.
- */
-bool add_to_swap(struct folio *folio)
-{
- swp_entry_t entry;
- int err;
-
- VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
- VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
-
- entry = folio_alloc_swap(folio);
- if (!entry.val)
- return false;
-
- /*
- * XArray node allocations from PF_MEMALLOC contexts could
- * completely exhaust the page allocator. __GFP_NOMEMALLOC
- * stops emergency reserves from being allocated.
- *
- * TODO: this could cause a theoretical memory reclaim
- * deadlock in the swap out path.
- */
- /*
- * Add it to the swap cache.
- */
- err = add_to_swap_cache(folio, entry,
- __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
- if (err)
- goto fail;
- /*
- * Normally the folio will be dirtied in unmap because its
- * pte should be dirty. A special case is MADV_FREE page. The
- * page's pte could have dirty bit cleared but the folio's
- * SwapBacked flag is still set because clearing the dirty bit
- * and SwapBacked flag has no lock protected. For such folio,
- * unmap will not set dirty bit for it, so folio reclaim will
- * not write the folio out. This can cause data corruption when
- * the folio is swapped in later. Always setting the dirty flag
- * for the folio solves the problem.
- */
- folio_mark_dirty(folio);
-
- return true;
-
-fail:
- put_swap_folio(folio, entry);
- return false;
-}
-
/*
* This must be called only on folios that have
* been verified to be in the swap cache and locked.
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 66c8869ef346..8449bd703bd8 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1181,9 +1181,9 @@ static bool get_swap_device_info(struct swap_info_struct *si)
* Fast path try to get swap entries with specified order from current
* CPU's swap entry pool (a cluster).
*/
-static int swap_alloc_fast(swp_entry_t *entry,
- unsigned char usage,
- int order)
+static bool swap_alloc_fast(swp_entry_t *entry,
+ unsigned char usage,
+ int order)
{
struct swap_cluster_info *ci;
struct swap_info_struct *si;
@@ -1203,47 +1203,31 @@ static int swap_alloc_fast(swp_entry_t *entry,
return !!found;
}
-swp_entry_t folio_alloc_swap(struct folio *folio)
+/* Rotate the device and switch to a new cluster */
+static bool swap_alloc_rotate(swp_entry_t *entry,
+ unsigned char usage,
+ int order)
{
- unsigned int order = folio_order(folio);
- unsigned int size = 1 << order;
- struct swap_info_struct *si, *next;
- swp_entry_t entry = {};
- unsigned long offset;
int node;
+ unsigned long offset;
+ struct swap_info_struct *si, *next;
- if (order) {
- /*
- * Should not even be attempting large allocations when huge
- * page swap is disabled. Warn and fail the allocation.
- */
- if (!IS_ENABLED(CONFIG_THP_SWAP) || size > SWAPFILE_CLUSTER) {
- VM_WARN_ON_ONCE(1);
- return entry;
- }
- }
-
- /* Fast path using percpu cluster */
- local_lock(&percpu_swap_cluster.lock);
- if (swap_alloc_fast(&entry, SWAP_HAS_CACHE, order))
- goto out_alloced;
-
- /* Rotate the device and switch to a new cluster */
+ node = numa_node_id();
spin_lock(&swap_avail_lock);
start_over:
- node = numa_node_id();
plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
+ /* Rotate the device and switch to a new cluster */
plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
spin_unlock(&swap_avail_lock);
if (get_swap_device_info(si)) {
offset = cluster_alloc_swap_entry(si, order, SWAP_HAS_CACHE);
put_swap_device(si);
if (offset) {
- entry = swp_entry(si->type, offset);
- goto out_alloced;
+ *entry = swp_entry(si->type, offset);
+ return true;
}
if (order)
- goto out_failed;
+ return false;
}
spin_lock(&swap_avail_lock);
@@ -1262,20 +1246,68 @@ swp_entry_t folio_alloc_swap(struct folio *folio)
goto start_over;
}
spin_unlock(&swap_avail_lock);
-out_failed:
+ return false;
+}
+
+/**
+ * folio_alloc_swap - allocate swap space for a folio
+ * @folio: folio we want to move to swap
+ * @gfp: gfp mask for shadow nodes
+ *
+ * Allocate swap space for the folio and add the folio to the
+ * swap cache.
+ *
+ * Context: Caller needs to hold the folio lock.
+ * Return: Whether the folio was added to the swap cache.
+ */
+bool folio_alloc_swap(struct folio *folio, gfp_t gfp)
+{
+ unsigned int order = folio_order(folio);
+ unsigned int size = 1 << order;
+ swp_entry_t entry = {};
+
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+ VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
+
+ /*
+ * Should not even be attempting large allocations when huge
+ * page swap is disabled. Warn and fail the allocation.
+ */
+ if (order && (!IS_ENABLED(CONFIG_THP_SWAP) || size > SWAPFILE_CLUSTER)) {
+ VM_WARN_ON_ONCE(1);
+ return false;
+ }
+
+ local_lock(&percpu_swap_cluster.lock);
+ if (swap_alloc_fast(&entry, SWAP_HAS_CACHE, order))
+ goto out_alloced;
+ if (swap_alloc_rotate(&entry, SWAP_HAS_CACHE, order))
+ goto out_alloced;
local_unlock(&percpu_swap_cluster.lock);
- return entry;
+ return false;
out_alloced:
local_unlock(&percpu_swap_cluster.lock);
- if (mem_cgroup_try_charge_swap(folio, entry)) {
- put_swap_folio(folio, entry);
- entry.val = 0;
- } else {
- atomic_long_sub(size, &nr_swap_pages);
- }
+ if (mem_cgroup_try_charge_swap(folio, entry))
+ goto out_free;
- return entry;
+ /*
+ * XArray node allocations from PF_MEMALLOC contexts could
+ * completely exhaust the page allocator. __GFP_NOMEMALLOC
+ * stops emergency reserves from being allocated.
+ *
+ * TODO: this could cause a theoretical memory reclaim
+ * deadlock in the swap out path.
+ */
+ if (add_to_swap_cache(folio, entry, gfp | __GFP_NOMEMALLOC, NULL))
+ goto out_free;
+
+ atomic_long_sub(size, &nr_swap_pages);
+ return true;
+
+out_free:
+ put_swap_folio(folio, entry);
+ return false;
}
static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index fcca38bc640f..71a6b597e469 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1289,7 +1289,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
split_folio_to_list(folio, folio_list))
goto activate_locked;
}
- if (!add_to_swap(folio)) {
+ if (!folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOWARN)) {
int __maybe_unused order = folio_order(folio);
if (!folio_test_large(folio))
@@ -1305,9 +1305,21 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
}
#endif
count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK);
- if (!add_to_swap(folio))
+ if (!folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOWARN))
goto activate_locked_split;
}
+ /*
+ * Normally the folio will be dirtied in unmap because its
+ * pte should be dirty. A special case is MADV_FREE page. The
+ * page's pte could have dirty bit cleared but the folio's
+ * SwapBacked flag is still set because clearing the dirty bit
+ * and SwapBacked flag has no lock protected. For such folio,
+ * unmap will not set dirty bit for it, so folio reclaim will
+ * not write the folio out. This can cause data corruption when
+ * the folio is swapped in later. Always setting the dirty flag
+ * for the folio solves the problem.
+ */
+ folio_mark_dirty(folio);
}
}
--
2.48.1
On 02/15/25 at 01:57am, Kairui Song wrote:
......snip..
> -swp_entry_t folio_alloc_swap(struct folio *folio)
> +/* Rotate the device and switch to a new cluster */
> +static bool swap_alloc_rotate(swp_entry_t *entry,
> + unsigned char usage,
> + int order)
The function name is misleading which may make people thing it's a HDD
swap allocation. I would call it swap_alloc_slow() relative to the
swap_alloc_fast().
> {
> - unsigned int order = folio_order(folio);
> - unsigned int size = 1 << order;
> - struct swap_info_struct *si, *next;
> - swp_entry_t entry = {};
> - unsigned long offset;
> int node;
> + unsigned long offset;
> + struct swap_info_struct *si, *next;
>
> - if (order) {
> - /*
> - * Should not even be attempting large allocations when huge
> - * page swap is disabled. Warn and fail the allocation.
> - */
> - if (!IS_ENABLED(CONFIG_THP_SWAP) || size > SWAPFILE_CLUSTER) {
> - VM_WARN_ON_ONCE(1);
> - return entry;
> - }
> - }
> -
> - /* Fast path using percpu cluster */
> - local_lock(&percpu_swap_cluster.lock);
> - if (swap_alloc_fast(&entry, SWAP_HAS_CACHE, order))
> - goto out_alloced;
> -
> - /* Rotate the device and switch to a new cluster */
> + node = numa_node_id();
> spin_lock(&swap_avail_lock);
> start_over:
> - node = numa_node_id();
> plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
> + /* Rotate the device and switch to a new cluster */
> plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
> spin_unlock(&swap_avail_lock);
> if (get_swap_device_info(si)) {
> offset = cluster_alloc_swap_entry(si, order, SWAP_HAS_CACHE);
> put_swap_device(si);
> if (offset) {
> - entry = swp_entry(si->type, offset);
> - goto out_alloced;
> + *entry = swp_entry(si->type, offset);
> + return true;
> }
> if (order)
> - goto out_failed;
> + return false;
> }
>
> spin_lock(&swap_avail_lock);
Hi Kairui,
kernel test robot noticed the following build errors:
[auto build test ERROR on akpm-mm/mm-everything]
url: https://github.com/intel-lab-lkp/linux/commits/Kairui-Song/mm-swap-avoid-reclaiming-irrelevant-swap-cache/20250215-020239
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/20250214175709.76029-8-ryncsn%40gmail.com
patch subject: [PATCH 7/7] mm, swap: simplify folio swap allocation
config: x86_64-buildonly-randconfig-001-20250215 (https://download.01.org/0day-ci/archive/20250216/202502160040.5ULBvBsP-lkp@intel.com/config)
compiler: gcc-12 (Debian 12.2.0-14) 12.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250216/202502160040.5ULBvBsP-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202502160040.5ULBvBsP-lkp@intel.com/
All error/warnings (new ones prefixed by >>):
In file included from include/linux/suspend.h:5,
from arch/x86/kernel/asm-offsets.c:14:
>> include/linux/swap.h:591:1: error: expected identifier or '(' before '{' token
591 | {
| ^
>> include/linux/swap.h:590:13: warning: 'folio_alloc_swap' declared 'static' but never defined [-Wunused-function]
590 | static bool folio_alloc_swap(struct folio *folio, gfp_t gfp_mask);
| ^~~~~~~~~~~~~~~~
make[3]: *** [scripts/Makefile.build:102: arch/x86/kernel/asm-offsets.s] Error 1 shuffle=2631350961
make[3]: Target 'prepare' not remade because of errors.
make[2]: *** [Makefile:1264: prepare0] Error 2 shuffle=2631350961
make[2]: Target 'prepare' not remade because of errors.
make[1]: *** [Makefile:251: __sub-make] Error 2 shuffle=2631350961
make[1]: Target 'prepare' not remade because of errors.
make: *** [Makefile:251: __sub-make] Error 2 shuffle=2631350961
make: Target 'prepare' not remade because of errors.
vim +591 include/linux/swap.h
8334b96221ff0d Minchan Kim 2015-09-08 589
f8d9ff0d052908 Kairui Song 2025-02-15 @590 static bool folio_alloc_swap(struct folio *folio, gfp_t gfp_mask);
^1da177e4c3f41 Linus Torvalds 2005-04-16 @591 {
f8d9ff0d052908 Kairui Song 2025-02-15 592 return false;
^1da177e4c3f41 Linus Torvalds 2005-04-16 593 }
^1da177e4c3f41 Linus Torvalds 2005-04-16 594
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
Hi Kairui,
kernel test robot noticed the following build errors:
[auto build test ERROR on akpm-mm/mm-everything]
url: https://github.com/intel-lab-lkp/linux/commits/Kairui-Song/mm-swap-avoid-reclaiming-irrelevant-swap-cache/20250215-020239
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/20250214175709.76029-8-ryncsn%40gmail.com
patch subject: [PATCH 7/7] mm, swap: simplify folio swap allocation
config: x86_64-buildonly-randconfig-002-20250215 (https://download.01.org/0day-ci/archive/20250216/202502160013.l8ZYewQK-lkp@intel.com/config)
compiler: clang version 19.1.3 (https://github.com/llvm/llvm-project ab51eccf88f5321e7c60591c5546b254b6afab99)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250216/202502160013.l8ZYewQK-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202502160013.l8ZYewQK-lkp@intel.com/
All errors (new ones prefixed by >>):
In file included from arch/x86/kernel/asm-offsets.c:14:
In file included from include/linux/suspend.h:5:
In file included from include/linux/swap.h:9:
In file included from include/linux/memcontrol.h:21:
In file included from include/linux/mm.h:2208:
include/linux/vmstat.h:504:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
504 | return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
| ~~~~~~~~~~~~~~~~~~~~~ ^
505 | item];
| ~~~~
include/linux/vmstat.h:511:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
511 | return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
| ~~~~~~~~~~~~~~~~~~~~~ ^
512 | NR_VM_NUMA_EVENT_ITEMS +
| ~~~~~~~~~~~~~~~~~~~~~~
include/linux/vmstat.h:518:36: warning: arithmetic between different enumeration types ('enum node_stat_item' and 'enum lru_list') [-Wenum-enum-conversion]
518 | return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
| ~~~~~~~~~~~ ^ ~~~
include/linux/vmstat.h:524:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
524 | return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
| ~~~~~~~~~~~~~~~~~~~~~ ^
525 | NR_VM_NUMA_EVENT_ITEMS +
| ~~~~~~~~~~~~~~~~~~~~~~
In file included from arch/x86/kernel/asm-offsets.c:14:
In file included from include/linux/suspend.h:5:
>> include/linux/swap.h:591:1: error: expected identifier or '('
591 | {
| ^
4 warnings and 1 error generated.
make[3]: *** [scripts/Makefile.build:102: arch/x86/kernel/asm-offsets.s] Error 1 shuffle=2496405435
make[3]: Target 'prepare' not remade because of errors.
make[2]: *** [Makefile:1264: prepare0] Error 2 shuffle=2496405435
make[2]: Target 'prepare' not remade because of errors.
make[1]: *** [Makefile:251: __sub-make] Error 2 shuffle=2496405435
make[1]: Target 'prepare' not remade because of errors.
make: *** [Makefile:251: __sub-make] Error 2 shuffle=2496405435
make: Target 'prepare' not remade because of errors.
vim +591 include/linux/swap.h
8334b96221ff0d Minchan Kim 2015-09-08 589
f8d9ff0d052908 Kairui Song 2025-02-15 590 static bool folio_alloc_swap(struct folio *folio, gfp_t gfp_mask);
^1da177e4c3f41 Linus Torvalds 2005-04-16 @591 {
f8d9ff0d052908 Kairui Song 2025-02-15 592 return false;
^1da177e4c3f41 Linus Torvalds 2005-04-16 593 }
^1da177e4c3f41 Linus Torvalds 2005-04-16 594
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
On Sat, Feb 15, 2025 at 01:57:09AM +0800, Kairui Song wrote:
> @@ -1648,20 +1639,20 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
> if (list_empty(&info->swaplist))
> list_add(&info->swaplist, &shmem_swaplist);
>
> - if (add_to_swap_cache(folio, swap,
> - __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
> - NULL) == 0) {
> + if (folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
add_to_swap_cache() returns 0 on success or -errno.
folio_alloc_swap returns true on success.
That would seem to indicate you should change the polarity of this test?
Or should folio_alloc_swap() return an errno? Is there value in
distinguishing why we couldn't alloc swap (ENOMEM vs ENOSPC, perhaps?)
On Sat, Feb 15, 2025 at 4:13 AM Matthew Wilcox <willy@infradead.org> wrote:
>
> On Sat, Feb 15, 2025 at 01:57:09AM +0800, Kairui Song wrote:
> > @@ -1648,20 +1639,20 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
> > if (list_empty(&info->swaplist))
> > list_add(&info->swaplist, &shmem_swaplist);
> >
> > - if (add_to_swap_cache(folio, swap,
> > - __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
> > - NULL) == 0) {
> > + if (folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
>
> add_to_swap_cache() returns 0 on success or -errno.
>
> folio_alloc_swap returns true on success.
>
> That would seem to indicate you should change the polarity of this test?
I think I already did? It was (add_to_swap_cache(...) == 0), now it's
(folio_alloc_swap(...))
>
> Or should folio_alloc_swap() return an errno? Is there value in
> distinguishing why we couldn't alloc swap (ENOMEM vs ENOSPC, perhaps?)
>
Good idea, return an error value might be more helpful in the future,
will update this part.
© 2016 - 2025 Red Hat, Inc.