Add new helper functions to abstract away zswap entry operations, in
order to facilitate re-implementing these functions when swap is
virtualized.
Signed-off-by: Nhat Pham <nphamcs@gmail.com>
---
mm/zswap.c | 59 ++++++++++++++++++++++++++++++++++++------------------
1 file changed, 40 insertions(+), 19 deletions(-)
diff --git a/mm/zswap.c b/mm/zswap.c
index 315e4d0d08311..a5a3f068bd1a6 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -234,6 +234,38 @@ static inline struct xarray *swap_zswap_tree(swp_entry_t swp)
>> ZSWAP_ADDRESS_SPACE_SHIFT];
}
+static inline void *zswap_entry_store(swp_entry_t swpentry,
+ struct zswap_entry *entry)
+{
+ struct xarray *tree = swap_zswap_tree(swpentry);
+ pgoff_t offset = swp_offset(swpentry);
+
+ return xa_store(tree, offset, entry, GFP_KERNEL);
+}
+
+static inline void *zswap_entry_load(swp_entry_t swpentry)
+{
+ struct xarray *tree = swap_zswap_tree(swpentry);
+ pgoff_t offset = swp_offset(swpentry);
+
+ return xa_load(tree, offset);
+}
+
+static inline void *zswap_entry_erase(swp_entry_t swpentry)
+{
+ struct xarray *tree = swap_zswap_tree(swpentry);
+ pgoff_t offset = swp_offset(swpentry);
+
+ return xa_erase(tree, offset);
+}
+
+static inline bool zswap_empty(swp_entry_t swpentry)
+{
+ struct xarray *tree = swap_zswap_tree(swpentry);
+
+ return xa_empty(tree);
+}
+
#define zswap_pool_debug(msg, p) \
pr_debug("%s pool %s\n", msg, (p)->tfm_name)
@@ -1000,8 +1032,6 @@ static bool zswap_decompress(struct zswap_entry *entry, struct folio *folio)
static int zswap_writeback_entry(struct zswap_entry *entry,
swp_entry_t swpentry)
{
- struct xarray *tree;
- pgoff_t offset = swp_offset(swpentry);
struct folio *folio;
struct mempolicy *mpol;
bool folio_was_allocated;
@@ -1040,8 +1070,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
* old compressed data. Only when this is successful can the entry
* be dereferenced.
*/
- tree = swap_zswap_tree(swpentry);
- if (entry != xa_load(tree, offset)) {
+ if (entry != zswap_entry_load(swpentry)) {
ret = -ENOMEM;
goto out;
}
@@ -1051,7 +1080,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
goto out;
}
- xa_erase(tree, offset);
+ zswap_entry_erase(swpentry);
count_vm_event(ZSWPWB);
if (entry->objcg)
@@ -1427,9 +1456,7 @@ static bool zswap_store_page(struct page *page,
if (!zswap_compress(page, entry, pool))
goto compress_failed;
- old = xa_store(swap_zswap_tree(page_swpentry),
- swp_offset(page_swpentry),
- entry, GFP_KERNEL);
+ old = zswap_entry_store(page_swpentry, entry);
if (xa_is_err(old)) {
int err = xa_err(old);
@@ -1563,11 +1590,9 @@ bool zswap_store(struct folio *folio)
unsigned type = swp_type(swp);
pgoff_t offset = swp_offset(swp);
struct zswap_entry *entry;
- struct xarray *tree;
for (index = 0; index < nr_pages; ++index) {
- tree = swap_zswap_tree(swp_entry(type, offset + index));
- entry = xa_erase(tree, offset + index);
+ entry = zswap_entry_erase(swp_entry(type, offset + index));
if (entry)
zswap_entry_free(entry);
}
@@ -1599,9 +1624,7 @@ bool zswap_store(struct folio *folio)
int zswap_load(struct folio *folio)
{
swp_entry_t swp = folio->swap;
- pgoff_t offset = swp_offset(swp);
bool swapcache = folio_test_swapcache(folio);
- struct xarray *tree = swap_zswap_tree(swp);
struct zswap_entry *entry;
VM_WARN_ON_ONCE(!folio_test_locked(folio));
@@ -1619,7 +1642,7 @@ int zswap_load(struct folio *folio)
return -EINVAL;
}
- entry = xa_load(tree, offset);
+ entry = zswap_entry_load(swp);
if (!entry)
return -ENOENT;
@@ -1648,7 +1671,7 @@ int zswap_load(struct folio *folio)
*/
if (swapcache) {
folio_mark_dirty(folio);
- xa_erase(tree, offset);
+ zswap_entry_erase(swp);
zswap_entry_free(entry);
}
@@ -1658,14 +1681,12 @@ int zswap_load(struct folio *folio)
void zswap_invalidate(swp_entry_t swp)
{
- pgoff_t offset = swp_offset(swp);
- struct xarray *tree = swap_zswap_tree(swp);
struct zswap_entry *entry;
- if (xa_empty(tree))
+ if (zswap_empty(swp))
return;
- entry = xa_erase(tree, offset);
+ entry = zswap_entry_erase(swp);
if (entry)
zswap_entry_free(entry);
}
--
2.47.3