[PATCH v3 18/20] memcg: swap: only charge physical swap slots

Nhat Pham posted 20 patches 2 days, 8 hours ago
There is a newer version of this series
[PATCH v3 18/20] memcg: swap: only charge physical swap slots
Posted by Nhat Pham 2 days, 8 hours ago
Now that zswap and the zero-filled swap page optimization no longer
takes up any physical swap space, we should not charge towards the swap
usage and limits of the memcg in these case. We will only record the
memcg id on virtual swap slot allocation, and defer physical swap
charging (i.e towards memory.swap.current) until the virtual swap slot
is backed by an actual physical swap slot (on zswap store failure
fallback or zswap writeback).

Signed-off-by: Nhat Pham <nphamcs@gmail.com>
---
 include/linux/swap.h | 16 +++++++++
 mm/memcontrol-v1.c   |  6 ++++
 mm/memcontrol.c      | 83 ++++++++++++++++++++++++++++++++------------
 mm/vswap.c           | 39 +++++++++------------
 4 files changed, 98 insertions(+), 46 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 9cd45eab313f8..a30d382fb5ee1 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -613,6 +613,22 @@ static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
 #endif
 
 #if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
+void __mem_cgroup_record_swap(struct folio *folio, swp_entry_t entry);
+static inline void mem_cgroup_record_swap(struct folio *folio,
+		swp_entry_t entry)
+{
+	if (!mem_cgroup_disabled())
+		__mem_cgroup_record_swap(folio, entry);
+}
+
+void __mem_cgroup_clear_swap(swp_entry_t entry, unsigned int nr_pages);
+static inline void mem_cgroup_clear_swap(swp_entry_t entry,
+		unsigned int nr_pages)
+{
+	if (!mem_cgroup_disabled())
+		__mem_cgroup_clear_swap(entry, nr_pages);
+}
+
 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry);
 static inline int mem_cgroup_try_charge_swap(struct folio *folio,
 		swp_entry_t entry)
diff --git a/mm/memcontrol-v1.c b/mm/memcontrol-v1.c
index 6eed14bff7426..4580a034dcf72 100644
--- a/mm/memcontrol-v1.c
+++ b/mm/memcontrol-v1.c
@@ -680,6 +680,12 @@ void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages)
 		 * memory+swap charge, drop the swap entry duplicate.
 		 */
 		mem_cgroup_uncharge_swap(entry, nr_pages);
+
+		/*
+		 * Clear the cgroup association now to prevent double memsw
+		 * uncharging when the backends are released later.
+		 */
+		mem_cgroup_clear_swap(entry, nr_pages);
 	}
 }
 
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2ba5811e7edba..50be8066bebec 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5172,6 +5172,49 @@ int __init mem_cgroup_init(void)
 }
 
 #ifdef CONFIG_SWAP
+/**
+ * __mem_cgroup_record_swap - record the folio's cgroup for the swap entries.
+ * @folio: folio being swapped out.
+ * @entry: the first swap entry in the range.
+ */
+void __mem_cgroup_record_swap(struct folio *folio, swp_entry_t entry)
+{
+	unsigned int nr_pages = folio_nr_pages(folio);
+	struct mem_cgroup *memcg;
+
+	/* Recording will be done by memcg1_swapout(). */
+	if (do_memsw_account())
+		return;
+
+	memcg = folio_memcg(folio);
+
+	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
+	if (!memcg)
+		return;
+
+	memcg = mem_cgroup_id_get_online(memcg);
+	if (nr_pages > 1)
+		mem_cgroup_id_get_many(memcg, nr_pages - 1);
+	swap_cgroup_record(folio, mem_cgroup_id(memcg), entry);
+}
+
+/**
+ * __mem_cgroup_clear_swap - clear cgroup information of the swap entries.
+ * @folio: folio being swapped out.
+ * @entry: the first swap entry in the range.
+ */
+void __mem_cgroup_clear_swap(swp_entry_t entry, unsigned int nr_pages)
+{
+	unsigned short id = swap_cgroup_clear(entry, nr_pages);
+	struct mem_cgroup *memcg;
+
+	rcu_read_lock();
+	memcg = mem_cgroup_from_id(id);
+	if (memcg)
+		mem_cgroup_id_put_many(memcg, nr_pages);
+	rcu_read_unlock();
+}
+
 /**
  * __mem_cgroup_try_charge_swap - try charging swap space for a folio
  * @folio: folio being added to swap
@@ -5190,34 +5233,24 @@ int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
 	if (do_memsw_account())
 		return 0;
 
-	memcg = folio_memcg(folio);
-
-	VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
-	if (!memcg)
-		return 0;
-
-	if (!entry.val) {
-		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
-		return 0;
-	}
-
-	memcg = mem_cgroup_id_get_online(memcg);
+	/*
+	 * We already record the cgroup on virtual swap allocation.
+	 * Note that the virtual swap slot holds a reference to memcg,
+	 * so this lookup should be safe.
+	 */
+	rcu_read_lock();
+	memcg = mem_cgroup_from_id(lookup_swap_cgroup_id(entry));
+	rcu_read_unlock();
 
 	if (!mem_cgroup_is_root(memcg) &&
 	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
 		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
-		mem_cgroup_id_put(memcg);
 		return -ENOMEM;
 	}
 
-	/* Get references for the tail pages, too */
-	if (nr_pages > 1)
-		mem_cgroup_id_get_many(memcg, nr_pages - 1);
 	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
 
-	swap_cgroup_record(folio, mem_cgroup_id(memcg), entry);
-
 	return 0;
 }
 
@@ -5231,7 +5264,8 @@ void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
 	struct mem_cgroup *memcg;
 	unsigned short id;
 
-	id = swap_cgroup_clear(entry, nr_pages);
+	id = lookup_swap_cgroup_id(entry);
+
 	rcu_read_lock();
 	memcg = mem_cgroup_from_id(id);
 	if (memcg) {
@@ -5242,7 +5276,6 @@ void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
 				page_counter_uncharge(&memcg->swap, nr_pages);
 		}
 		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
-		mem_cgroup_id_put_many(memcg, nr_pages);
 	}
 	rcu_read_unlock();
 }
@@ -5251,14 +5284,18 @@ static bool mem_cgroup_may_zswap(struct mem_cgroup *original_memcg);
 
 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
 {
-	long nr_swap_pages, nr_zswap_pages = 0;
+	long nr_swap_pages;
 
 	if (zswap_is_enabled() && (mem_cgroup_disabled() || do_memsw_account() ||
 				mem_cgroup_may_zswap(memcg))) {
-		nr_zswap_pages = PAGE_COUNTER_MAX;
+		/*
+		 * No need to check swap cgroup limits, since zswap is not charged
+		 * towards swap consumption.
+		 */
+		return PAGE_COUNTER_MAX;
 	}
 
-	nr_swap_pages = max_t(long, nr_zswap_pages, get_nr_swap_pages());
+	nr_swap_pages = get_nr_swap_pages();
 	if (mem_cgroup_disabled() || do_memsw_account())
 		return nr_swap_pages;
 	for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
diff --git a/mm/vswap.c b/mm/vswap.c
index 7563107eb8eee..2a071d5ae173c 100644
--- a/mm/vswap.c
+++ b/mm/vswap.c
@@ -543,6 +543,7 @@ void vswap_rmap_set(struct swap_cluster_info *ci, swp_slot_t slot,
 	struct vswap_cluster *cluster = NULL;
 	struct swp_desc *desc;
 	unsigned long flush_nr, phys_swap_start = 0, phys_swap_end = 0;
+	unsigned long phys_swap_released = 0;
 	unsigned int phys_swap_type = 0;
 	bool need_flushing_phys_swap = false;
 	swp_slot_t flush_slot;
@@ -572,6 +573,7 @@ void vswap_rmap_set(struct swap_cluster_info *ci, swp_slot_t slot,
 		if (desc->type == VSWAP_ZSWAP && desc->zswap_entry) {
 			zswap_entry_free(desc->zswap_entry);
 		} else if (desc->type == VSWAP_SWAPFILE) {
+			phys_swap_released++;
 			if (!phys_swap_start) {
 				/* start a new contiguous range of phys swap */
 				phys_swap_start = swp_slot_offset(desc->slot);
@@ -602,6 +604,9 @@ void vswap_rmap_set(struct swap_cluster_info *ci, swp_slot_t slot,
 		flush_nr = phys_swap_end - phys_swap_start;
 		swap_slot_free_nr(flush_slot, flush_nr);
 	}
+
+	if (phys_swap_released)
+		mem_cgroup_uncharge_swap(entry, phys_swap_released);
  }
 
 /*
@@ -629,7 +634,7 @@ static void vswap_free(struct vswap_cluster *cluster, struct swp_desc *desc,
 	spin_unlock(&cluster->lock);
 
 	release_backing(entry, 1);
-	mem_cgroup_uncharge_swap(entry, 1);
+	mem_cgroup_clear_swap(entry, 1);
 
 	/* erase forward mapping and release the virtual slot for reallocation */
 	spin_lock(&cluster->lock);
@@ -644,9 +649,6 @@ static void vswap_free(struct vswap_cluster *cluster, struct swp_desc *desc,
  */
 int folio_alloc_swap(struct folio *folio)
 {
-	struct vswap_cluster *cluster = NULL;
-	int i, nr = folio_nr_pages(folio);
-	struct swp_desc *desc;
 	swp_entry_t entry;
 
 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
@@ -656,25 +658,7 @@ int folio_alloc_swap(struct folio *folio)
 	if (!entry.val)
 		return -ENOMEM;
 
-	/*
-	 * XXX: for now, we charge towards the memory cgroup's swap limit on virtual
-	 * swap slots allocation. This will be changed soon - we will only charge on
-	 * physical swap slots allocation.
-	 */
-	if (mem_cgroup_try_charge_swap(folio, entry)) {
-		rcu_read_lock();
-		for (i = 0; i < nr; i++) {
-			desc = vswap_iter(&cluster, entry.val + i);
-			VM_WARN_ON(!desc);
-			vswap_free(cluster, desc, (swp_entry_t){ entry.val + i });
-		}
-		spin_unlock(&cluster->lock);
-		rcu_read_unlock();
-		atomic_add(nr, &vswap_alloc_reject);
-		entry.val = 0;
-		return -ENOMEM;
-	}
-
+	mem_cgroup_record_swap(folio, entry);
 	swap_cache_add_folio(folio, entry, NULL);
 
 	return 0;
@@ -716,6 +700,15 @@ bool vswap_alloc_swap_slot(struct folio *folio)
 	if (!slot.val)
 		return false;
 
+	if (mem_cgroup_try_charge_swap(folio, entry)) {
+		/*
+		 * We have not updated the backing type of the virtual swap slot.
+		 * Simply free up the physical swap slots here!
+		 */
+		swap_slot_free_nr(slot, nr);
+		return false;
+	}
+
 	/* establish the vrtual <-> physical swap slots linkages. */
 	si = __swap_slot_to_info(slot);
 	ci = swap_cluster_lock(si, swp_slot_offset(slot));
-- 
2.47.3
Re: [PATCH v3 18/20] memcg: swap: only charge physical swap slots
Posted by kernel test robot 2 days, 4 hours ago
Hi Nhat,

kernel test robot noticed the following build errors:

[auto build test ERROR on linus/master]
[also build test ERROR on v6.19]
[cannot apply to akpm-mm/mm-everything tj-cgroup/for-next tip/smp/core next-20260205]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Nhat-Pham/swap-rearrange-the-swap-header-file/20260209-065842
base:   linus/master
patch link:    https://lore.kernel.org/r/20260208215839.87595-19-nphamcs%40gmail.com
patch subject: [PATCH v3 18/20] memcg: swap: only charge physical swap slots
config: sparc64-defconfig (https://download.01.org/0day-ci/archive/20260209/202602091006.0jXoavPW-lkp@intel.com/config)
compiler: clang version 20.1.8 (https://github.com/llvm/llvm-project 87f0227cb60147a26a1eeb4fb06e3b505e9c7261)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260209/202602091006.0jXoavPW-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202602091006.0jXoavPW-lkp@intel.com/

All errors (new ones prefixed by >>):

>> mm/vswap.c:637:2: error: call to undeclared function 'mem_cgroup_clear_swap'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
     637 |         mem_cgroup_clear_swap(entry, 1);
         |         ^
   mm/vswap.c:637:2: note: did you mean 'mem_cgroup_uncharge_swap'?
   include/linux/swap.h:658:20: note: 'mem_cgroup_uncharge_swap' declared here
     658 | static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
         |                    ^
>> mm/vswap.c:661:2: error: call to undeclared function 'mem_cgroup_record_swap'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
     661 |         mem_cgroup_record_swap(folio, entry);
         |         ^
   mm/vswap.c:661:2: note: did you mean 'mem_cgroup_uncharge_swap'?
   include/linux/swap.h:658:20: note: 'mem_cgroup_uncharge_swap' declared here
     658 | static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
         |                    ^
   2 errors generated.


vim +/mem_cgroup_clear_swap +637 mm/vswap.c

   528	
   529	/*
   530	 * Caller needs to handle races with other operations themselves.
   531	 *
   532	 * Specifically, this function is safe to be called in contexts where the swap
   533	 * entry has been added to the swap cache and the associated folio is locked.
   534	 * We cannot race with other accessors, and the swap entry is guaranteed to be
   535	 * valid the whole time (since swap cache implies one refcount).
   536	 *
   537	 * We cannot assume that the backends will be of the same type,
   538	 * contiguous, etc. We might have a large folio coalesced from subpages with
   539	 * mixed backend, which is only rectified when it is reclaimed.
   540	 */
   541	 static void release_backing(swp_entry_t entry, int nr)
   542	{
   543		struct vswap_cluster *cluster = NULL;
   544		struct swp_desc *desc;
   545		unsigned long flush_nr, phys_swap_start = 0, phys_swap_end = 0;
   546		unsigned long phys_swap_released = 0;
   547		unsigned int phys_swap_type = 0;
   548		bool need_flushing_phys_swap = false;
   549		swp_slot_t flush_slot;
   550		int i;
   551	
   552		VM_WARN_ON(!entry.val);
   553	
   554		rcu_read_lock();
   555		for (i = 0; i < nr; i++) {
   556			desc = vswap_iter(&cluster, entry.val + i);
   557			VM_WARN_ON(!desc);
   558	
   559			/*
   560			 * We batch contiguous physical swap slots for more efficient
   561			 * freeing.
   562			 */
   563			if (phys_swap_start != phys_swap_end &&
   564					(desc->type != VSWAP_SWAPFILE ||
   565						swp_slot_type(desc->slot) != phys_swap_type ||
   566						swp_slot_offset(desc->slot) != phys_swap_end)) {
   567				need_flushing_phys_swap = true;
   568				flush_slot = swp_slot(phys_swap_type, phys_swap_start);
   569				flush_nr = phys_swap_end - phys_swap_start;
   570				phys_swap_start = phys_swap_end = 0;
   571			}
   572	
   573			if (desc->type == VSWAP_ZSWAP && desc->zswap_entry) {
   574				zswap_entry_free(desc->zswap_entry);
   575			} else if (desc->type == VSWAP_SWAPFILE) {
   576				phys_swap_released++;
   577				if (!phys_swap_start) {
   578					/* start a new contiguous range of phys swap */
   579					phys_swap_start = swp_slot_offset(desc->slot);
   580					phys_swap_end = phys_swap_start + 1;
   581					phys_swap_type = swp_slot_type(desc->slot);
   582				} else {
   583					/* extend the current contiguous range of phys swap */
   584					phys_swap_end++;
   585				}
   586			}
   587	
   588			desc->slot.val = 0;
   589	
   590			if (need_flushing_phys_swap) {
   591				spin_unlock(&cluster->lock);
   592				cluster = NULL;
   593				swap_slot_free_nr(flush_slot, flush_nr);
   594				need_flushing_phys_swap = false;
   595			}
   596		}
   597		if (cluster)
   598			spin_unlock(&cluster->lock);
   599		rcu_read_unlock();
   600	
   601		/* Flush any remaining physical swap range */
   602		if (phys_swap_start) {
   603			flush_slot = swp_slot(phys_swap_type, phys_swap_start);
   604			flush_nr = phys_swap_end - phys_swap_start;
   605			swap_slot_free_nr(flush_slot, flush_nr);
   606		}
   607	
   608		if (phys_swap_released)
   609			mem_cgroup_uncharge_swap(entry, phys_swap_released);
   610	 }
   611	
   612	/*
   613	 * Entered with the cluster locked, but might unlock the cluster.
   614	 * This is because several operations, such as releasing physical swap slots
   615	 * (i.e swap_slot_free_nr()) require the cluster to be unlocked to avoid
   616	 * deadlocks.
   617	 *
   618	 * This is safe, because:
   619	 *
   620	 * 1. The swap entry to be freed has refcnt (swap count and swapcache pin)
   621	 *    down to 0, so no one can change its internal state
   622	 *
   623	 * 2. The swap entry to be freed still holds a refcnt to the cluster, keeping
   624	 *    the cluster itself valid.
   625	 *
   626	 * We will exit the function with the cluster re-locked.
   627	 */
   628	static void vswap_free(struct vswap_cluster *cluster, struct swp_desc *desc,
   629		swp_entry_t entry)
   630	{
   631		/* Clear shadow if present */
   632		if (xa_is_value(desc->shadow))
   633			desc->shadow = NULL;
   634		spin_unlock(&cluster->lock);
   635	
   636		release_backing(entry, 1);
 > 637		mem_cgroup_clear_swap(entry, 1);
   638	
   639		/* erase forward mapping and release the virtual slot for reallocation */
   640		spin_lock(&cluster->lock);
   641		release_vswap_slot(cluster, entry.val);
   642	}
   643	
   644	/**
   645	 * folio_alloc_swap - allocate virtual swap space for a folio.
   646	 * @folio: the folio.
   647	 *
   648	 * Return: 0, if the allocation succeeded, -ENOMEM, if the allocation failed.
   649	 */
   650	int folio_alloc_swap(struct folio *folio)
   651	{
   652		swp_entry_t entry;
   653	
   654		VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
   655		VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
   656	
   657		entry = vswap_alloc(folio);
   658		if (!entry.val)
   659			return -ENOMEM;
   660	
 > 661		mem_cgroup_record_swap(folio, entry);
   662		swap_cache_add_folio(folio, entry, NULL);
   663	
   664		return 0;
   665	}
   666	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
Re: [PATCH v3 18/20] memcg: swap: only charge physical swap slots
Posted by kernel test robot 2 days, 4 hours ago
Hi Nhat,

kernel test robot noticed the following build errors:

[auto build test ERROR on linus/master]
[also build test ERROR on v6.19]
[cannot apply to akpm-mm/mm-everything tj-cgroup/for-next tip/smp/core next-20260205]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Nhat-Pham/swap-rearrange-the-swap-header-file/20260209-065842
base:   linus/master
patch link:    https://lore.kernel.org/r/20260208215839.87595-19-nphamcs%40gmail.com
patch subject: [PATCH v3 18/20] memcg: swap: only charge physical swap slots
config: hexagon-randconfig-001-20260209 (https://download.01.org/0day-ci/archive/20260209/202602090941.opY2jzUD-lkp@intel.com/config)
compiler: clang version 16.0.6 (https://github.com/llvm/llvm-project 7cbf1a2591520c2491aa35339f227775f4d3adf6)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260209/202602090941.opY2jzUD-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202602090941.opY2jzUD-lkp@intel.com/

All errors (new ones prefixed by >>):

>> mm/memcontrol-v1.c:688:3: error: call to undeclared function 'mem_cgroup_clear_swap'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
                   mem_cgroup_clear_swap(entry, nr_pages);
                   ^
   mm/memcontrol-v1.c:688:3: note: did you mean 'mem_cgroup_uncharge_swap'?
   include/linux/swap.h:658:20: note: 'mem_cgroup_uncharge_swap' declared here
   static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
                      ^
   1 error generated.


vim +/mem_cgroup_clear_swap +688 mm/memcontrol-v1.c

   651	
   652	/*
   653	 * memcg1_swapin - uncharge swap slot
   654	 * @entry: the first swap entry for which the pages are charged
   655	 * @nr_pages: number of pages which will be uncharged
   656	 *
   657	 * Call this function after successfully adding the charged page to swapcache.
   658	 *
   659	 * Note: This function assumes the page for which swap slot is being uncharged
   660	 * is order 0 page.
   661	 */
   662	void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages)
   663	{
   664		/*
   665		 * Cgroup1's unified memory+swap counter has been charged with the
   666		 * new swapcache page, finish the transfer by uncharging the swap
   667		 * slot. The swap slot would also get uncharged when it dies, but
   668		 * it can stick around indefinitely and we'd count the page twice
   669		 * the entire time.
   670		 *
   671		 * Cgroup2 has separate resource counters for memory and swap,
   672		 * so this is a non-issue here. Memory and swap charge lifetimes
   673		 * correspond 1:1 to page and swap slot lifetimes: we charge the
   674		 * page to memory here, and uncharge swap when the slot is freed.
   675		 */
   676		if (do_memsw_account()) {
   677			/*
   678			 * The swap entry might not get freed for a long time,
   679			 * let's not wait for it.  The page already received a
   680			 * memory+swap charge, drop the swap entry duplicate.
   681			 */
   682			mem_cgroup_uncharge_swap(entry, nr_pages);
   683	
   684			/*
   685			 * Clear the cgroup association now to prevent double memsw
   686			 * uncharging when the backends are released later.
   687			 */
 > 688			mem_cgroup_clear_swap(entry, nr_pages);
   689		}
   690	}
   691	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki