Remove the zswap tree and manage zswap entries directly
through the virtual swap descriptor. This re-partitions the zswap pool
(by virtual swap cluster), which eliminates zswap tree lock contention.
Signed-off-by: Nhat Pham <nphamcs@gmail.com>
---
include/linux/zswap.h | 6 +++
mm/vswap.c | 100 ++++++++++++++++++++++++++++++++++++++++++
mm/zswap.c | 40 -----------------
3 files changed, 106 insertions(+), 40 deletions(-)
diff --git a/include/linux/zswap.h b/include/linux/zswap.h
index 1a04caf283dc8..7eb3ce7e124fc 100644
--- a/include/linux/zswap.h
+++ b/include/linux/zswap.h
@@ -6,6 +6,7 @@
#include <linux/mm_types.h>
struct lruvec;
+struct zswap_entry;
extern atomic_long_t zswap_stored_pages;
@@ -33,6 +34,11 @@ void zswap_lruvec_state_init(struct lruvec *lruvec);
void zswap_folio_swapin(struct folio *folio);
bool zswap_is_enabled(void);
bool zswap_never_enabled(void);
+void *zswap_entry_store(swp_entry_t swpentry, struct zswap_entry *entry);
+void *zswap_entry_load(swp_entry_t swpentry);
+void *zswap_entry_erase(swp_entry_t swpentry);
+bool zswap_empty(swp_entry_t swpentry);
+
#else
struct zswap_lruvec_state {};
diff --git a/mm/vswap.c b/mm/vswap.c
index d44199dc059a3..9bb733f00fd21 100644
--- a/mm/vswap.c
+++ b/mm/vswap.c
@@ -10,6 +10,7 @@
#include <linux/swapops.h>
#include <linux/swap_cgroup.h>
#include <linux/cpuhotplug.h>
+#include <linux/zswap.h>
#include "swap.h"
#include "swap_table.h"
@@ -37,11 +38,13 @@
* Swap descriptor - metadata of a swapped out page.
*
* @slot: The handle to the physical swap slot backing this page.
+ * @zswap_entry: The zswap entry associated with this swap slot.
* @swap_cache: The folio in swap cache.
* @shadow: The shadow entry.
*/
struct swp_desc {
swp_slot_t slot;
+ struct zswap_entry *zswap_entry;
union {
struct folio *swap_cache;
void *shadow;
@@ -238,6 +241,7 @@ static void __vswap_alloc_from_cluster(struct vswap_cluster *cluster, int start)
for (i = 0; i < nr; i++) {
desc = &cluster->descriptors[start + i];
desc->slot.val = 0;
+ desc->zswap_entry = NULL;
}
cluster->count += nr;
}
@@ -1009,6 +1013,102 @@ void __swap_cache_replace_folio(struct folio *old, struct folio *new)
rcu_read_unlock();
}
+#ifdef CONFIG_ZSWAP
+/**
+ * zswap_entry_store - store a zswap entry for a swap entry
+ * @swpentry: the swap entry
+ * @entry: the zswap entry to store
+ *
+ * Stores a zswap entry in the swap descriptor for the given swap entry.
+ * The cluster is locked during the store operation.
+ *
+ * Return: the old zswap entry if one existed, NULL otherwise
+ */
+void *zswap_entry_store(swp_entry_t swpentry, struct zswap_entry *entry)
+{
+ struct vswap_cluster *cluster = NULL;
+ struct swp_desc *desc;
+ void *old;
+
+ rcu_read_lock();
+ desc = vswap_iter(&cluster, swpentry.val);
+ if (!desc) {
+ rcu_read_unlock();
+ return NULL;
+ }
+
+ old = desc->zswap_entry;
+ desc->zswap_entry = entry;
+ spin_unlock(&cluster->lock);
+ rcu_read_unlock();
+
+ return old;
+}
+
+/**
+ * zswap_entry_load - load a zswap entry for a swap entry
+ * @swpentry: the swap entry
+ *
+ * Loads the zswap entry from the swap descriptor for the given swap entry.
+ *
+ * Return: the zswap entry if one exists, NULL otherwise
+ */
+void *zswap_entry_load(swp_entry_t swpentry)
+{
+ struct vswap_cluster *cluster = NULL;
+ struct swp_desc *desc;
+ void *zswap_entry;
+
+ rcu_read_lock();
+ desc = vswap_iter(&cluster, swpentry.val);
+ if (!desc) {
+ rcu_read_unlock();
+ return NULL;
+ }
+
+ zswap_entry = desc->zswap_entry;
+ spin_unlock(&cluster->lock);
+ rcu_read_unlock();
+
+ return zswap_entry;
+}
+
+/**
+ * zswap_entry_erase - erase a zswap entry for a swap entry
+ * @swpentry: the swap entry
+ *
+ * Erases the zswap entry from the swap descriptor for the given swap entry.
+ * The cluster is locked during the erase operation.
+ *
+ * Return: the zswap entry that was erased, NULL if none existed
+ */
+void *zswap_entry_erase(swp_entry_t swpentry)
+{
+ struct vswap_cluster *cluster = NULL;
+ struct swp_desc *desc;
+ void *old;
+
+ rcu_read_lock();
+ desc = vswap_iter(&cluster, swpentry.val);
+ if (!desc) {
+ rcu_read_unlock();
+ return NULL;
+ }
+
+ old = desc->zswap_entry;
+ desc->zswap_entry = NULL;
+ spin_unlock(&cluster->lock);
+ rcu_read_unlock();
+
+ return old;
+}
+
+bool zswap_empty(swp_entry_t swpentry)
+{
+ return xa_empty(&vswap_cluster_map);
+}
+#endif /* CONFIG_ZSWAP */
+
int vswap_init(void)
{
int i;
diff --git a/mm/zswap.c b/mm/zswap.c
index f7313261673ff..72441131f094e 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -223,37 +223,6 @@ static bool zswap_has_pool;
* helpers and fwd declarations
**********************************/
-static DEFINE_XARRAY(zswap_tree);
-
-#define zswap_tree_index(entry) (entry.val)
-
-static inline void *zswap_entry_store(swp_entry_t swpentry,
- struct zswap_entry *entry)
-{
- pgoff_t offset = zswap_tree_index(swpentry);
-
- return xa_store(&zswap_tree, offset, entry, GFP_KERNEL);
-}
-
-static inline void *zswap_entry_load(swp_entry_t swpentry)
-{
- pgoff_t offset = zswap_tree_index(swpentry);
-
- return xa_load(&zswap_tree, offset);
-}
-
-static inline void *zswap_entry_erase(swp_entry_t swpentry)
-{
- pgoff_t offset = zswap_tree_index(swpentry);
-
- return xa_erase(&zswap_tree, offset);
-}
-
-static inline bool zswap_empty(swp_entry_t swpentry)
-{
- return xa_empty(&zswap_tree);
-}
-
#define zswap_pool_debug(msg, p) \
pr_debug("%s pool %s\n", msg, (p)->tfm_name)
@@ -1445,13 +1414,6 @@ static bool zswap_store_page(struct page *page,
goto compress_failed;
old = zswap_entry_store(page_swpentry, entry);
- if (xa_is_err(old)) {
- int err = xa_err(old);
-
- WARN_ONCE(err != -ENOMEM, "unexpected xarray error: %d\n", err);
- zswap_reject_alloc_fail++;
- goto store_failed;
- }
/*
* We may have had an existing entry that became stale when
@@ -1498,8 +1460,6 @@ static bool zswap_store_page(struct page *page,
return true;
-store_failed:
- zs_free(pool->zs_pool, entry->handle);
compress_failed:
zswap_entry_cache_free(entry);
return false;
--
2.47.3