mm/zswap.c | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-)
From: Kairui Song <kasong@tencent.com>
Since commit f1879e8a0c60 ("mm, swap: never bypass the swap cache even
for SWP_SYNCHRONOUS_IO"), all swap-in operations go through the swap
cache, including those from SWP_SYNCHRONOUS_IO devices like
zram. Which means the workaround for swap cache bypassing
introduced by commit 25cd241408a2 ("mm: zswap: fix data loss on
SWP_SYNCHRONOUS_IO devices") is no longer needed. Remove it, but
keep the comments that are still helpful.
Suggested-by: Yosry Ahmed <yosry.ahmed@linux.dev>
Signed-off-by: Kairui Song <kasong@tencent.com>
---
mm/zswap.c | 19 ++++++-------------
1 file changed, 6 insertions(+), 13 deletions(-)
diff --git a/mm/zswap.c b/mm/zswap.c
index 3d2d59ac3f9c..8cd61603ff79 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1589,11 +1589,11 @@ int zswap_load(struct folio *folio)
{
swp_entry_t swp = folio->swap;
pgoff_t offset = swp_offset(swp);
- bool swapcache = folio_test_swapcache(folio);
struct xarray *tree = swap_zswap_tree(swp);
struct zswap_entry *entry;
VM_WARN_ON_ONCE(!folio_test_locked(folio));
+ VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
if (zswap_never_enabled())
return -ENOENT;
@@ -1624,22 +1624,15 @@ int zswap_load(struct folio *folio)
count_objcg_events(entry->objcg, ZSWPIN, 1);
/*
- * When reading into the swapcache, invalidate our entry. The
- * swapcache can be the authoritative owner of the page and
+ * We are reading into the swapcache, invalidate zswap entry.
+ * The swapcache is the authoritative owner of the page and
* its mappings, and the pressure that results from having two
* in-memory copies outweighs any benefits of caching the
* compression work.
- *
- * (Most swapins go through the swapcache. The notable
- * exception is the singleton fault on SWP_SYNCHRONOUS_IO
- * files, which reads into a private page and may free it if
- * the fault fails. We remain the primary owner of the entry.)
*/
- if (swapcache) {
- folio_mark_dirty(folio);
- xa_erase(tree, offset);
- zswap_entry_free(entry);
- }
+ folio_mark_dirty(folio);
+ xa_erase(tree, offset);
+ zswap_entry_free(entry);
folio_unlock(folio);
return 0;
---
base-commit: 2c263046cbe6d9d5fce3dfeba063f199f7e6298f
change-id: 20251226-zswap-syncio-cleanup-a05b7fc6180f
Best regards,
--
Kairui Song <kasong@tencent.com>
On 2026/2/2 01:47, Kairui Song wrote:
> From: Kairui Song <kasong@tencent.com>
>
> Since commit f1879e8a0c60 ("mm, swap: never bypass the swap cache even
> for SWP_SYNCHRONOUS_IO"), all swap-in operations go through the swap
> cache, including those from SWP_SYNCHRONOUS_IO devices like
> zram. Which means the workaround for swap cache bypassing
> introduced by commit 25cd241408a2 ("mm: zswap: fix data loss on
> SWP_SYNCHRONOUS_IO devices") is no longer needed. Remove it, but
> keep the comments that are still helpful.
>
> Suggested-by: Yosry Ahmed <yosry.ahmed@linux.dev>
> Signed-off-by: Kairui Song <kasong@tencent.com>
Reviewed-by: Chengming Zhou <chengming.zhou@linux.dev>
Thanks!
> ---
> mm/zswap.c | 19 ++++++-------------
> 1 file changed, 6 insertions(+), 13 deletions(-)
>
> diff --git a/mm/zswap.c b/mm/zswap.c
> index 3d2d59ac3f9c..8cd61603ff79 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -1589,11 +1589,11 @@ int zswap_load(struct folio *folio)
> {
> swp_entry_t swp = folio->swap;
> pgoff_t offset = swp_offset(swp);
> - bool swapcache = folio_test_swapcache(folio);
> struct xarray *tree = swap_zswap_tree(swp);
> struct zswap_entry *entry;
>
> VM_WARN_ON_ONCE(!folio_test_locked(folio));
> + VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
>
> if (zswap_never_enabled())
> return -ENOENT;
> @@ -1624,22 +1624,15 @@ int zswap_load(struct folio *folio)
> count_objcg_events(entry->objcg, ZSWPIN, 1);
>
> /*
> - * When reading into the swapcache, invalidate our entry. The
> - * swapcache can be the authoritative owner of the page and
> + * We are reading into the swapcache, invalidate zswap entry.
> + * The swapcache is the authoritative owner of the page and
> * its mappings, and the pressure that results from having two
> * in-memory copies outweighs any benefits of caching the
> * compression work.
> - *
> - * (Most swapins go through the swapcache. The notable
> - * exception is the singleton fault on SWP_SYNCHRONOUS_IO
> - * files, which reads into a private page and may free it if
> - * the fault fails. We remain the primary owner of the entry.)
> */
> - if (swapcache) {
> - folio_mark_dirty(folio);
> - xa_erase(tree, offset);
> - zswap_entry_free(entry);
> - }
> + folio_mark_dirty(folio);
> + xa_erase(tree, offset);
> + zswap_entry_free(entry);
>
> folio_unlock(folio);
> return 0;
>
> ---
> base-commit: 2c263046cbe6d9d5fce3dfeba063f199f7e6298f
> change-id: 20251226-zswap-syncio-cleanup-a05b7fc6180f
>
> Best regards,
On Sun, Feb 1, 2026 at 9:47 AM Kairui Song <ryncsn@gmail.com> wrote:
>
> From: Kairui Song <kasong@tencent.com>
>
> Since commit f1879e8a0c60 ("mm, swap: never bypass the swap cache even
> for SWP_SYNCHRONOUS_IO"), all swap-in operations go through the swap
> cache, including those from SWP_SYNCHRONOUS_IO devices like
> zram. Which means the workaround for swap cache bypassing
> introduced by commit 25cd241408a2 ("mm: zswap: fix data loss on
> SWP_SYNCHRONOUS_IO devices") is no longer needed. Remove it, but
> keep the comments that are still helpful.
>
> Suggested-by: Yosry Ahmed <yosry.ahmed@linux.dev>
> Signed-off-by: Kairui Song <kasong@tencent.com>
Awesome! Thanks for removing that weird case.
Acked-by: Nhat Pham <nphamcs@gmail.com>
On Mon, Feb 02, 2026 at 01:47:32AM +0800, Kairui Song wrote:
> From: Kairui Song <kasong@tencent.com>
>
> Since commit f1879e8a0c60 ("mm, swap: never bypass the swap cache even
> for SWP_SYNCHRONOUS_IO"), all swap-in operations go through the swap
> cache, including those from SWP_SYNCHRONOUS_IO devices like
> zram. Which means the workaround for swap cache bypassing
> introduced by commit 25cd241408a2 ("mm: zswap: fix data loss on
> SWP_SYNCHRONOUS_IO devices") is no longer needed. Remove it, but
> keep the comments that are still helpful.
>
> Suggested-by: Yosry Ahmed <yosry.ahmed@linux.dev>
> Signed-off-by: Kairui Song <kasong@tencent.com>
Acked-by: Yosry Ahmed <yosry.ahmed@linux.dev>
Thanks!
On Sun, Feb 1, 2026 at 9:47 AM Kairui Song <ryncsn@gmail.com> wrote:
>
> From: Kairui Song <kasong@tencent.com>
>
> Since commit f1879e8a0c60 ("mm, swap: never bypass the swap cache even
> for SWP_SYNCHRONOUS_IO"), all swap-in operations go through the swap
> cache, including those from SWP_SYNCHRONOUS_IO devices like
> zram. Which means the workaround for swap cache bypassing
> introduced by commit 25cd241408a2 ("mm: zswap: fix data loss on
> SWP_SYNCHRONOUS_IO devices") is no longer needed. Remove it, but
> keep the comments that are still helpful.
Acked-by: Chris Li <chrisl@kernel.org>
Chris
>
> Suggested-by: Yosry Ahmed <yosry.ahmed@linux.dev>
> Signed-off-by: Kairui Song <kasong@tencent.com>
> ---
> mm/zswap.c | 19 ++++++-------------
> 1 file changed, 6 insertions(+), 13 deletions(-)
>
> diff --git a/mm/zswap.c b/mm/zswap.c
> index 3d2d59ac3f9c..8cd61603ff79 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -1589,11 +1589,11 @@ int zswap_load(struct folio *folio)
> {
> swp_entry_t swp = folio->swap;
> pgoff_t offset = swp_offset(swp);
> - bool swapcache = folio_test_swapcache(folio);
> struct xarray *tree = swap_zswap_tree(swp);
> struct zswap_entry *entry;
>
> VM_WARN_ON_ONCE(!folio_test_locked(folio));
> + VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
>
> if (zswap_never_enabled())
> return -ENOENT;
> @@ -1624,22 +1624,15 @@ int zswap_load(struct folio *folio)
> count_objcg_events(entry->objcg, ZSWPIN, 1);
>
> /*
> - * When reading into the swapcache, invalidate our entry. The
> - * swapcache can be the authoritative owner of the page and
> + * We are reading into the swapcache, invalidate zswap entry.
> + * The swapcache is the authoritative owner of the page and
> * its mappings, and the pressure that results from having two
> * in-memory copies outweighs any benefits of caching the
> * compression work.
> - *
> - * (Most swapins go through the swapcache. The notable
> - * exception is the singleton fault on SWP_SYNCHRONOUS_IO
> - * files, which reads into a private page and may free it if
> - * the fault fails. We remain the primary owner of the entry.)
> */
> - if (swapcache) {
> - folio_mark_dirty(folio);
> - xa_erase(tree, offset);
> - zswap_entry_free(entry);
> - }
> + folio_mark_dirty(folio);
> + xa_erase(tree, offset);
> + zswap_entry_free(entry);
>
> folio_unlock(folio);
> return 0;
>
> ---
> base-commit: 2c263046cbe6d9d5fce3dfeba063f199f7e6298f
> change-id: 20251226-zswap-syncio-cleanup-a05b7fc6180f
>
> Best regards,
> --
> Kairui Song <kasong@tencent.com>
>
On Mon, Feb 2, 2026 at 1:47 AM Kairui Song <ryncsn@gmail.com> wrote:
>
> From: Kairui Song <kasong@tencent.com>
>
> Since commit f1879e8a0c60 ("mm, swap: never bypass the swap cache even
> for SWP_SYNCHRONOUS_IO"), all swap-in operations go through the swap
> cache, including those from SWP_SYNCHRONOUS_IO devices like
> zram. Which means the workaround for swap cache bypassing
> introduced by commit 25cd241408a2 ("mm: zswap: fix data loss on
> SWP_SYNCHRONOUS_IO devices") is no longer needed. Remove it, but
> keep the comments that are still helpful.
>
> Suggested-by: Yosry Ahmed <yosry.ahmed@linux.dev>
> Signed-off-by: Kairui Song <kasong@tencent.com>
LGTM, thanks!
Reviewed-by: Barry Song <baohua@kernel.org>
© 2016 - 2026 Red Hat, Inc.