Once we decouple a swap entry from its backing store via the virtual
swap, we can no longer statically allocate an array to store the swap
entries' cgroup information. Move it to the swap descriptor.
Note that the memory overhead for swap cgroup information is now on
demand, i.e dynamically incurred when the virtual swap cluster is
allocated. This help reduces the memory overhead in a huge but
sparsely used swap space.
For instance, a 2 TB swapfile consists of 2147483648 swap slots, each
incurring 2 bytes of overhead for swap cgroup, for a total of 1 GB. If
we only utilize 10% of the swapfile, we will save 900 MB.
Signed-off-by: Nhat Pham <nphamcs@gmail.com>
---
include/linux/swap_cgroup.h | 13 ---
mm/Makefile | 3 -
mm/swap_cgroup.c | 174 ------------------------------------
mm/swapfile.c | 7 --
mm/vswap.c | 95 ++++++++++++++++++++
5 files changed, 95 insertions(+), 197 deletions(-)
delete mode 100644 mm/swap_cgroup.c
diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h
index 91cdf12190a03..a2abb4d6fa085 100644
--- a/include/linux/swap_cgroup.h
+++ b/include/linux/swap_cgroup.h
@@ -9,8 +9,6 @@
extern void swap_cgroup_record(struct folio *folio, unsigned short id, swp_entry_t ent);
extern unsigned short swap_cgroup_clear(swp_entry_t ent, unsigned int nr_ents);
extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent);
-extern int swap_cgroup_swapon(int type, unsigned long max_pages);
-extern void swap_cgroup_swapoff(int type);
#else
@@ -31,17 +29,6 @@ unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
return 0;
}
-static inline int
-swap_cgroup_swapon(int type, unsigned long max_pages)
-{
- return 0;
-}
-
-static inline void swap_cgroup_swapoff(int type)
-{
- return;
-}
-
#endif
#endif /* __LINUX_SWAP_CGROUP_H */
diff --git a/mm/Makefile b/mm/Makefile
index 67fa4586e7e18..a7538784191bf 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -103,9 +103,6 @@ obj-$(CONFIG_PAGE_COUNTER) += page_counter.o
obj-$(CONFIG_LIVEUPDATE) += memfd_luo.o
obj-$(CONFIG_MEMCG_V1) += memcontrol-v1.o
obj-$(CONFIG_MEMCG) += memcontrol.o vmpressure.o
-ifdef CONFIG_SWAP
-obj-$(CONFIG_MEMCG) += swap_cgroup.o
-endif
obj-$(CONFIG_CGROUP_HUGETLB) += hugetlb_cgroup.o
obj-$(CONFIG_GUP_TEST) += gup_test.o
obj-$(CONFIG_DMAPOOL_TEST) += dmapool_test.o
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
deleted file mode 100644
index 77ce1d66c318d..0000000000000
--- a/mm/swap_cgroup.c
+++ /dev/null
@@ -1,174 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/swap_cgroup.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
-
-#include <linux/swapops.h> /* depends on mm.h include */
-
-static DEFINE_MUTEX(swap_cgroup_mutex);
-
-/* Pack two cgroup id (short) of two entries in one swap_cgroup (atomic_t) */
-#define ID_PER_SC (sizeof(struct swap_cgroup) / sizeof(unsigned short))
-#define ID_SHIFT (BITS_PER_TYPE(unsigned short))
-#define ID_MASK (BIT(ID_SHIFT) - 1)
-struct swap_cgroup {
- atomic_t ids;
-};
-
-struct swap_cgroup_ctrl {
- struct swap_cgroup *map;
-};
-
-static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
-
-static unsigned short __swap_cgroup_id_lookup(struct swap_cgroup *map,
- pgoff_t offset)
-{
- unsigned int shift = (offset % ID_PER_SC) * ID_SHIFT;
- unsigned int old_ids = atomic_read(&map[offset / ID_PER_SC].ids);
-
- BUILD_BUG_ON(!is_power_of_2(ID_PER_SC));
- BUILD_BUG_ON(sizeof(struct swap_cgroup) != sizeof(atomic_t));
-
- return (old_ids >> shift) & ID_MASK;
-}
-
-static unsigned short __swap_cgroup_id_xchg(struct swap_cgroup *map,
- pgoff_t offset,
- unsigned short new_id)
-{
- unsigned short old_id;
- struct swap_cgroup *sc = &map[offset / ID_PER_SC];
- unsigned int shift = (offset % ID_PER_SC) * ID_SHIFT;
- unsigned int new_ids, old_ids = atomic_read(&sc->ids);
-
- do {
- old_id = (old_ids >> shift) & ID_MASK;
- new_ids = (old_ids & ~(ID_MASK << shift));
- new_ids |= ((unsigned int)new_id) << shift;
- } while (!atomic_try_cmpxchg(&sc->ids, &old_ids, new_ids));
-
- return old_id;
-}
-
-/**
- * swap_cgroup_record - record mem_cgroup for a set of swap entries.
- * These entries must belong to one single folio, and that folio
- * must be being charged for swap space (swap out), and these
- * entries must not have been charged
- *
- * @folio: the folio that the swap entry belongs to
- * @id: mem_cgroup ID to be recorded
- * @ent: the first swap entry to be recorded
- */
-void swap_cgroup_record(struct folio *folio, unsigned short id,
- swp_entry_t ent)
-{
- unsigned int nr_ents = folio_nr_pages(folio);
- swp_slot_t slot = swp_entry_to_swp_slot(ent);
- struct swap_cgroup *map;
- pgoff_t offset, end;
- unsigned short old;
-
- offset = swp_slot_offset(slot);
- end = offset + nr_ents;
- map = swap_cgroup_ctrl[swp_slot_type(slot)].map;
-
- do {
- old = __swap_cgroup_id_xchg(map, offset, id);
- VM_BUG_ON(old);
- } while (++offset != end);
-}
-
-/**
- * swap_cgroup_clear - clear mem_cgroup for a set of swap entries.
- * These entries must be being uncharged from swap. They either
- * belongs to one single folio in the swap cache (swap in for
- * cgroup v1), or no longer have any users (slot freeing).
- *
- * @ent: the first swap entry to be recorded into
- * @nr_ents: number of swap entries to be recorded
- *
- * Returns the existing old value.
- */
-unsigned short swap_cgroup_clear(swp_entry_t ent, unsigned int nr_ents)
-{
- swp_slot_t slot = swp_entry_to_swp_slot(ent);
- pgoff_t offset = swp_slot_offset(slot);
- pgoff_t end = offset + nr_ents;
- struct swap_cgroup *map;
- unsigned short old, iter = 0;
-
- map = swap_cgroup_ctrl[swp_slot_type(slot)].map;
-
- do {
- old = __swap_cgroup_id_xchg(map, offset, 0);
- if (!iter)
- iter = old;
- VM_BUG_ON(iter != old);
- } while (++offset != end);
-
- return old;
-}
-
-/**
- * lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry
- * @ent: swap entry to be looked up.
- *
- * Returns ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
- */
-unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
-{
- struct swap_cgroup_ctrl *ctrl;
- swp_slot_t slot = swp_entry_to_swp_slot(ent);
-
- if (mem_cgroup_disabled())
- return 0;
-
- ctrl = &swap_cgroup_ctrl[swp_slot_type(slot)];
- return __swap_cgroup_id_lookup(ctrl->map, swp_slot_offset(slot));
-}
-
-int swap_cgroup_swapon(int type, unsigned long max_pages)
-{
- struct swap_cgroup *map;
- struct swap_cgroup_ctrl *ctrl;
-
- if (mem_cgroup_disabled())
- return 0;
-
- BUILD_BUG_ON(sizeof(unsigned short) * ID_PER_SC !=
- sizeof(struct swap_cgroup));
- map = vzalloc(DIV_ROUND_UP(max_pages, ID_PER_SC) *
- sizeof(struct swap_cgroup));
- if (!map)
- goto nomem;
-
- ctrl = &swap_cgroup_ctrl[type];
- mutex_lock(&swap_cgroup_mutex);
- ctrl->map = map;
- mutex_unlock(&swap_cgroup_mutex);
-
- return 0;
-nomem:
- pr_info("couldn't allocate enough memory for swap_cgroup\n");
- pr_info("swap_cgroup can be disabled by swapaccount=0 boot option\n");
- return -ENOMEM;
-}
-
-void swap_cgroup_swapoff(int type)
-{
- struct swap_cgroup *map;
- struct swap_cgroup_ctrl *ctrl;
-
- if (mem_cgroup_disabled())
- return;
-
- mutex_lock(&swap_cgroup_mutex);
- ctrl = &swap_cgroup_ctrl[type];
- map = ctrl->map;
- ctrl->map = NULL;
- mutex_unlock(&swap_cgroup_mutex);
-
- vfree(map);
-}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 68ec5d9f05848..345877786e432 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2931,8 +2931,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
vfree(swap_map);
kvfree(zeromap);
free_cluster_info(cluster_info, maxpages);
- /* Destroy swap account information */
- swap_cgroup_swapoff(p->type);
inode = mapping->host;
@@ -3497,10 +3495,6 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
goto bad_swap_unlock_inode;
}
- error = swap_cgroup_swapon(si->type, maxpages);
- if (error)
- goto bad_swap_unlock_inode;
-
error = setup_swap_map(si, swap_header, swap_map, maxpages);
if (error)
goto bad_swap_unlock_inode;
@@ -3605,7 +3599,6 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
si->global_cluster = NULL;
inode = NULL;
destroy_swap_extents(si);
- swap_cgroup_swapoff(si->type);
spin_lock(&swap_lock);
si->swap_file = NULL;
si->flags = 0;
diff --git a/mm/vswap.c b/mm/vswap.c
index 9bb733f00fd21..64747493ca9f7 100644
--- a/mm/vswap.c
+++ b/mm/vswap.c
@@ -41,6 +41,7 @@
* @zswap_entry: The zswap entry associated with this swap slot.
* @swap_cache: The folio in swap cache.
* @shadow: The shadow entry.
+ * @memcgid: The memcg id of the owning memcg, if any.
*/
struct swp_desc {
swp_slot_t slot;
@@ -49,6 +50,9 @@ struct swp_desc {
struct folio *swap_cache;
void *shadow;
};
+#ifdef CONFIG_MEMCG
+ unsigned short memcgid;
+#endif
};
#define VSWAP_CLUSTER_SHIFT HPAGE_PMD_ORDER
@@ -242,6 +246,9 @@ static void __vswap_alloc_from_cluster(struct vswap_cluster *cluster, int start)
desc = &cluster->descriptors[start + i];
desc->slot.val = 0;
desc->zswap_entry = NULL;
+#ifdef CONFIG_MEMCG
+ desc->memcgid = 0;
+#endif
}
cluster->count += nr;
}
@@ -1109,6 +1116,94 @@ bool zswap_empty(swp_entry_t swpentry)
}
#endif /* CONFIG_ZSWAP */
+#ifdef CONFIG_MEMCG
+static unsigned short vswap_cgroup_record(swp_entry_t entry,
+ unsigned short memcgid, unsigned int nr_ents)
+{
+ struct vswap_cluster *cluster = NULL;
+ struct swp_desc *desc;
+ unsigned short oldid, iter = 0;
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < nr_ents; i++) {
+ desc = vswap_iter(&cluster, entry.val + i);
+ VM_WARN_ON(!desc);
+ oldid = desc->memcgid;
+ desc->memcgid = memcgid;
+ if (!iter)
+ iter = oldid;
+ VM_WARN_ON(iter != oldid);
+ }
+ spin_unlock(&cluster->lock);
+ rcu_read_unlock();
+
+ return oldid;
+}
+
+/**
+ * swap_cgroup_record - record mem_cgroup for a set of swap entries.
+ * These entries must belong to one single folio, and that folio
+ * must be being charged for swap space (swap out), and these
+ * entries must not have been charged
+ *
+ * @folio: the folio that the swap entry belongs to
+ * @memcgid: mem_cgroup ID to be recorded
+ * @entry: the first swap entry to be recorded
+ */
+void swap_cgroup_record(struct folio *folio, unsigned short memcgid,
+ swp_entry_t entry)
+{
+ unsigned short oldid =
+ vswap_cgroup_record(entry, memcgid, folio_nr_pages(folio));
+
+ VM_WARN_ON(oldid);
+}
+
+/**
+ * swap_cgroup_clear - clear mem_cgroup for a set of swap entries.
+ * These entries must be being uncharged from swap. They either
+ * belongs to one single folio in the swap cache (swap in for
+ * cgroup v1), or no longer have any users (slot freeing).
+ *
+ * @entry: the first swap entry to be recorded into
+ * @nr_ents: number of swap entries to be recorded
+ *
+ * Returns the existing old value.
+ */
+unsigned short swap_cgroup_clear(swp_entry_t entry, unsigned int nr_ents)
+{
+ return vswap_cgroup_record(entry, 0, nr_ents);
+}
+
+/**
+ * lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry
+ * @entry: swap entry to be looked up.
+ *
+ * Returns ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
+ */
+unsigned short lookup_swap_cgroup_id(swp_entry_t entry)
+{
+ struct vswap_cluster *cluster = NULL;
+ struct swp_desc *desc;
+ unsigned short ret;
+
+ /*
+ * Note that the virtual swap slot can be freed under us, for instance in
+ * the invocation of mem_cgroup_swapin_charge_folio. We need to wrap the
+ * entire lookup in RCU read-side critical section, and double check the
+ * existence of the swap descriptor.
+ */
+ rcu_read_lock();
+ desc = vswap_iter(&cluster, entry.val);
+ ret = desc ? desc->memcgid : 0;
+ if (cluster)
+ spin_unlock(&cluster->lock);
+ rcu_read_unlock();
+ return ret;
+}
+#endif /* CONFIG_MEMCG */
+
int vswap_init(void)
{
int i;
--
2.47.3