This patch refactors __filemap_add_folio() to extract its core
critical section logic into a new helper function,
__filemap_add_folio_xa_locked(). The refactoring maintains the
existing functionality while enabling finer control over locking
granularity for callers.
Key changes:
- Move the xarray insertion logic from __filemap_add_folio() into
__filemap_add_folio_xa_locked()
- Modify __filemap_add_folio() to accept a pre-initialized xa_state
and a 'xa_locked' parameter
- Update the function signature in the header file accordingly
- Adjust existing callers (filemap_add_folio() and
hugetlb_add_to_page_cache()) to use the new interface
The refactoring is functionally equivalent to the previous code:
- When 'xa_locked' is false, __filemap_add_folio() acquires the xarray
lock internally (existing behavior)
- When 'xa_locked' is true, the caller is responsible for holding the
xarray lock, and __filemap_add_folio() only executes the critical
section
This separation prepares for the subsequent patch that introduces
batch folio insertion, where multiple folios can be added to the
page cache within a single lock hold.
No performance changes are expected from this patch alone, as it
only reorganizes code without altering the execution flow.
Reported-by: Gang Deng <gang.deng@intel.com>
Reviewed-by: Tianyou Li <tianyou.li@intel.com>
Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Zhiguo Zhou <zhiguo.zhou@intel.com>
---
include/linux/pagemap.h | 2 +-
mm/filemap.c | 173 +++++++++++++++++++++++-----------------
mm/hugetlb.c | 3 +-
3 files changed, 103 insertions(+), 75 deletions(-)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 31a848485ad9..59cbf57fb55b 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -1297,7 +1297,7 @@ loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
/* Must be non-static for BPF error injection */
int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
- pgoff_t index, gfp_t gfp, void **shadowp);
+ struct xa_state *xas, gfp_t gfp, void **shadowp, bool xa_locked);
bool filemap_range_has_writeback(struct address_space *mapping,
loff_t start_byte, loff_t end_byte);
diff --git a/mm/filemap.c b/mm/filemap.c
index ebd75684cb0a..eb9e28e5cbd7 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -845,95 +845,114 @@ void replace_page_cache_folio(struct folio *old, struct folio *new)
}
EXPORT_SYMBOL_GPL(replace_page_cache_folio);
-noinline int __filemap_add_folio(struct address_space *mapping,
- struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp)
+/*
+ * The critical section for storing a folio in an XArray.
+ * Context: Expects xas->xa->xa_lock to be held.
+ */
+static void __filemap_add_folio_xa_locked(struct xa_state *xas,
+ struct address_space *mapping, struct folio *folio, void **shadowp)
{
- XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
bool huge;
long nr;
unsigned int forder = folio_order(folio);
+ int order = -1;
+ void *entry, *old = NULL;
- VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
- VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
- VM_BUG_ON_FOLIO(folio_order(folio) < mapping_min_folio_order(mapping),
- folio);
- mapping_set_update(&xas, mapping);
+ lockdep_assert_held(xas->xa->xa_lock);
- VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
huge = folio_test_hugetlb(folio);
nr = folio_nr_pages(folio);
- gfp &= GFP_RECLAIM_MASK;
- folio_ref_add(folio, nr);
- folio->mapping = mapping;
- folio->index = xas.xa_index;
-
- for (;;) {
- int order = -1;
- void *entry, *old = NULL;
-
- xas_lock_irq(&xas);
- xas_for_each_conflict(&xas, entry) {
- old = entry;
- if (!xa_is_value(entry)) {
- xas_set_err(&xas, -EEXIST);
- goto unlock;
- }
- /*
- * If a larger entry exists,
- * it will be the first and only entry iterated.
- */
- if (order == -1)
- order = xas_get_order(&xas);
+ xas_for_each_conflict(xas, entry) {
+ old = entry;
+ if (!xa_is_value(entry)) {
+ xas_set_err(xas, -EEXIST);
+ return;
}
+ /*
+ * If a larger entry exists,
+ * it will be the first and only entry iterated.
+ */
+ if (order == -1)
+ order = xas_get_order(xas);
+ }
- if (old) {
- if (order > 0 && order > forder) {
- unsigned int split_order = max(forder,
- xas_try_split_min_order(order));
-
- /* How to handle large swap entries? */
- BUG_ON(shmem_mapping(mapping));
-
- while (order > forder) {
- xas_set_order(&xas, index, split_order);
- xas_try_split(&xas, old, order);
- if (xas_error(&xas))
- goto unlock;
- order = split_order;
- split_order =
- max(xas_try_split_min_order(
- split_order),
- forder);
- }
- xas_reset(&xas);
+ if (old) {
+ if (order > 0 && order > forder) {
+ unsigned int split_order = max(forder,
+ xas_try_split_min_order(order));
+
+ /* How to handle large swap entries? */
+ BUG_ON(shmem_mapping(mapping));
+
+ while (order > forder) {
+ xas_set_order(xas, xas->xa_index, split_order);
+ xas_try_split(xas, old, order);
+ if (xas_error(xas))
+ return;
+ order = split_order;
+ split_order =
+ max(xas_try_split_min_order(
+ split_order),
+ forder);
}
- if (shadowp)
- *shadowp = old;
+ xas_reset(xas);
}
+ if (shadowp)
+ *shadowp = old;
+ }
- xas_store(&xas, folio);
- if (xas_error(&xas))
- goto unlock;
+ xas_store(xas, folio);
+ if (xas_error(xas))
+ return;
- mapping->nrpages += nr;
+ mapping->nrpages += nr;
- /* hugetlb pages do not participate in page cache accounting */
- if (!huge) {
- lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
- if (folio_test_pmd_mappable(folio))
- lruvec_stat_mod_folio(folio,
- NR_FILE_THPS, nr);
- }
+ /* hugetlb pages do not participate in page cache accounting */
+ if (!huge) {
+ lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
+ if (folio_test_pmd_mappable(folio))
+ lruvec_stat_mod_folio(folio,
+ NR_FILE_THPS, nr);
+ }
+}
-unlock:
- xas_unlock_irq(&xas);
+noinline int __filemap_add_folio(struct address_space *mapping,
+ struct folio *folio, struct xa_state *xas,
+ gfp_t gfp, void **shadowp, bool xa_locked)
+{
+ long nr;
- if (!xas_nomem(&xas, gfp))
- break;
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+ VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
+ VM_BUG_ON_FOLIO(folio_order(folio) < mapping_min_folio_order(mapping),
+ folio);
+ mapping_set_update(xas, mapping);
+
+ VM_BUG_ON_FOLIO(xas->xa_index & (folio_nr_pages(folio) - 1), folio);
+ nr = folio_nr_pages(folio);
+
+ gfp &= GFP_RECLAIM_MASK;
+ folio_ref_add(folio, nr);
+ folio->mapping = mapping;
+ folio->index = xas->xa_index;
+
+ if (xa_locked) {
+ lockdep_assert_held(xas->xa->xa_lock);
+ __filemap_add_folio_xa_locked(xas, mapping, folio, shadowp);
+ } else {
+ lockdep_assert_not_held(xas->xa->xa_lock);
+ for (;;) {
+ xas_lock_irq(xas);
+ __filemap_add_folio_xa_locked(xas, mapping, folio, shadowp);
+ xas_unlock_irq(xas);
+
+ if (!xas_nomem(xas, gfp))
+ break;
+ }
}
- if (xas_error(&xas))
+ if (xas_error(xas))
goto error;
trace_mm_filemap_add_to_page_cache(folio);
@@ -942,12 +961,12 @@ noinline int __filemap_add_folio(struct address_space *mapping,
folio->mapping = NULL;
/* Leave folio->index set: truncation relies upon it */
folio_put_refs(folio, nr);
- return xas_error(&xas);
+ return xas_error(xas);
}
ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO);
-int filemap_add_folio(struct address_space *mapping, struct folio *folio,
- pgoff_t index, gfp_t gfp)
+static int _filemap_add_folio(struct address_space *mapping, struct folio *folio,
+ struct xa_state *xas, gfp_t gfp, bool xa_locked)
{
void *shadow = NULL;
int ret;
@@ -963,7 +982,7 @@ int filemap_add_folio(struct address_space *mapping, struct folio *folio,
return ret;
__folio_set_locked(folio);
- ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow);
+ ret = __filemap_add_folio(mapping, folio, xas, gfp, &shadow, xa_locked);
if (unlikely(ret)) {
mem_cgroup_uncharge(folio);
__folio_clear_locked(folio);
@@ -987,6 +1006,14 @@ int filemap_add_folio(struct address_space *mapping, struct folio *folio,
}
return ret;
}
+
+int filemap_add_folio(struct address_space *mapping, struct folio *folio,
+ pgoff_t index, gfp_t gfp)
+{
+ XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
+
+ return _filemap_add_folio(mapping, folio, &xas, gfp, false);
+}
EXPORT_SYMBOL_GPL(filemap_add_folio);
#ifdef CONFIG_NUMA
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 51273baec9e5..5c6c6b9e463f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5657,10 +5657,11 @@ int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping
struct inode *inode = mapping->host;
struct hstate *h = hstate_inode(inode);
int err;
+ XA_STATE_ORDER(xas, &mapping->i_pages, idx, folio_order(folio));
idx <<= huge_page_order(h);
__folio_set_locked(folio);
- err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
+ err = __filemap_add_folio(mapping, folio, &xas, GFP_KERNEL, NULL, false);
if (unlikely(err)) {
__folio_clear_locked(folio);
--
2.43.0
Hi Zhiguo,
kernel test robot noticed the following build errors:
[auto build test ERROR on akpm-mm/mm-everything]
url: https://github.com/intel-lab-lkp/linux/commits/Zhiguo-Zhou/mm-filemap-refactor-__filemap_add_folio-to-separate-critical-section/20260119-143737
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/20260119065027.918085-2-zhiguo.zhou%40intel.com
patch subject: [PATCH 1/2] mm/filemap: refactor __filemap_add_folio to separate critical section
config: s390-randconfig-002-20260119 (https://download.01.org/0day-ci/archive/20260119/202601191620.O1a0T02o-lkp@intel.com/config)
compiler: clang version 22.0.0git (https://github.com/llvm/llvm-project 9b8addffa70cee5b2acc5454712d9cf78ce45710)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260119/202601191620.O1a0T02o-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202601191620.O1a0T02o-lkp@intel.com/
All errors (new ones prefixed by >>):
>> mm/filemap.c:861:2: error: member reference type 'spinlock_t' (aka 'struct spinlock') is not a pointer; did you mean to use '.'?
861 | lockdep_assert_held(xas->xa->xa_lock);
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/lockdep.h:285:17: note: expanded from macro 'lockdep_assert_held'
285 | lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
| ~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/lockdep.h:252:52: note: expanded from macro 'lockdep_is_held'
252 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
| ^
include/linux/lockdep.h:279:32: note: expanded from macro 'lockdep_assert'
279 | do { WARN_ON(debug_locks && !(cond)); } while (0)
| ~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~
include/asm-generic/bug.h:110:25: note: expanded from macro 'WARN_ON'
110 | int __ret_warn_on = !!(condition); \
| ^~~~~~~~~
>> mm/filemap.c:861:2: error: cannot take the address of an rvalue of type 'struct lockdep_map'
861 | lockdep_assert_held(xas->xa->xa_lock);
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/lockdep.h:285:17: note: expanded from macro 'lockdep_assert_held'
285 | lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
| ~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/lockdep.h:252:45: note: expanded from macro 'lockdep_is_held'
252 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
| ^
include/linux/lockdep.h:279:32: note: expanded from macro 'lockdep_assert'
279 | do { WARN_ON(debug_locks && !(cond)); } while (0)
| ~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~
include/asm-generic/bug.h:110:25: note: expanded from macro 'WARN_ON'
110 | int __ret_warn_on = !!(condition); \
| ^~~~~~~~~
mm/filemap.c:941:3: error: member reference type 'spinlock_t' (aka 'struct spinlock') is not a pointer; did you mean to use '.'?
941 | lockdep_assert_held(xas->xa->xa_lock);
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/lockdep.h:285:17: note: expanded from macro 'lockdep_assert_held'
285 | lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
| ~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/lockdep.h:252:52: note: expanded from macro 'lockdep_is_held'
252 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
| ^
include/linux/lockdep.h:279:32: note: expanded from macro 'lockdep_assert'
279 | do { WARN_ON(debug_locks && !(cond)); } while (0)
| ~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~
include/asm-generic/bug.h:110:25: note: expanded from macro 'WARN_ON'
110 | int __ret_warn_on = !!(condition); \
| ^~~~~~~~~
mm/filemap.c:941:3: error: cannot take the address of an rvalue of type 'struct lockdep_map'
941 | lockdep_assert_held(xas->xa->xa_lock);
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/lockdep.h:285:17: note: expanded from macro 'lockdep_assert_held'
285 | lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
| ~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/lockdep.h:252:45: note: expanded from macro 'lockdep_is_held'
252 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
| ^
include/linux/lockdep.h:279:32: note: expanded from macro 'lockdep_assert'
279 | do { WARN_ON(debug_locks && !(cond)); } while (0)
| ~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~
include/asm-generic/bug.h:110:25: note: expanded from macro 'WARN_ON'
110 | int __ret_warn_on = !!(condition); \
| ^~~~~~~~~
mm/filemap.c:944:3: error: member reference type 'spinlock_t' (aka 'struct spinlock') is not a pointer; did you mean to use '.'?
944 | lockdep_assert_not_held(xas->xa->xa_lock);
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/lockdep.h:288:17: note: expanded from macro 'lockdep_assert_not_held'
288 | lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD)
| ~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/lockdep.h:252:52: note: expanded from macro 'lockdep_is_held'
252 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
| ^
include/linux/lockdep.h:279:32: note: expanded from macro 'lockdep_assert'
279 | do { WARN_ON(debug_locks && !(cond)); } while (0)
| ~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~
include/asm-generic/bug.h:110:25: note: expanded from macro 'WARN_ON'
110 | int __ret_warn_on = !!(condition); \
| ^~~~~~~~~
mm/filemap.c:944:3: error: cannot take the address of an rvalue of type 'struct lockdep_map'
944 | lockdep_assert_not_held(xas->xa->xa_lock);
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/lockdep.h:288:17: note: expanded from macro 'lockdep_assert_not_held'
288 | lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD)
| ~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/lockdep.h:252:45: note: expanded from macro 'lockdep_is_held'
252 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
| ^
include/linux/lockdep.h:279:32: note: expanded from macro 'lockdep_assert'
279 | do { WARN_ON(debug_locks && !(cond)); } while (0)
| ~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~
include/asm-generic/bug.h:110:25: note: expanded from macro 'WARN_ON'
110 | int __ret_warn_on = !!(condition); \
| ^~~~~~~~~
6 errors generated.
vim +861 mm/filemap.c
847
848 /*
849 * The critical section for storing a folio in an XArray.
850 * Context: Expects xas->xa->xa_lock to be held.
851 */
852 static void __filemap_add_folio_xa_locked(struct xa_state *xas,
853 struct address_space *mapping, struct folio *folio, void **shadowp)
854 {
855 bool huge;
856 long nr;
857 unsigned int forder = folio_order(folio);
858 int order = -1;
859 void *entry, *old = NULL;
860
> 861 lockdep_assert_held(xas->xa->xa_lock);
862
863 huge = folio_test_hugetlb(folio);
864 nr = folio_nr_pages(folio);
865
866 xas_for_each_conflict(xas, entry) {
867 old = entry;
868 if (!xa_is_value(entry)) {
869 xas_set_err(xas, -EEXIST);
870 return;
871 }
872 /*
873 * If a larger entry exists,
874 * it will be the first and only entry iterated.
875 */
876 if (order == -1)
877 order = xas_get_order(xas);
878 }
879
880 if (old) {
881 if (order > 0 && order > forder) {
882 unsigned int split_order = max(forder,
883 xas_try_split_min_order(order));
884
885 /* How to handle large swap entries? */
886 BUG_ON(shmem_mapping(mapping));
887
888 while (order > forder) {
889 xas_set_order(xas, xas->xa_index, split_order);
890 xas_try_split(xas, old, order);
891 if (xas_error(xas))
892 return;
893 order = split_order;
894 split_order =
895 max(xas_try_split_min_order(
896 split_order),
897 forder);
898 }
899 xas_reset(xas);
900 }
901 if (shadowp)
902 *shadowp = old;
903 }
904
905 xas_store(xas, folio);
906 if (xas_error(xas))
907 return;
908
909 mapping->nrpages += nr;
910
911 /* hugetlb pages do not participate in page cache accounting */
912 if (!huge) {
913 lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
914 if (folio_test_pmd_mappable(folio))
915 lruvec_stat_mod_folio(folio,
916 NR_FILE_THPS, nr);
917 }
918 }
919
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
Hi Zhiguo,
kernel test robot noticed the following build errors:
[auto build test ERROR on akpm-mm/mm-everything]
url: https://github.com/intel-lab-lkp/linux/commits/Zhiguo-Zhou/mm-filemap-refactor-__filemap_add_folio-to-separate-critical-section/20260119-143737
base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link: https://lore.kernel.org/r/20260119065027.918085-2-zhiguo.zhou%40intel.com
patch subject: [PATCH 1/2] mm/filemap: refactor __filemap_add_folio to separate critical section
config: i386-randconfig-002-20260119 (https://download.01.org/0day-ci/archive/20260119/202601191644.IqmJBjDM-lkp@intel.com/config)
compiler: gcc-13 (Debian 13.3.0-16) 13.3.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260119/202601191644.IqmJBjDM-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202601191644.IqmJBjDM-lkp@intel.com/
All errors (new ones prefixed by >>):
In file included from arch/x86/include/asm/bug.h:193,
from arch/x86/include/asm/alternative.h:9,
from arch/x86/include/asm/barrier.h:5,
from include/asm-generic/bitops/generic-non-atomic.h:7,
from include/linux/bitops.h:28,
from include/linux/log2.h:12,
from arch/x86/include/asm/div64.h:8,
from include/linux/math.h:6,
from include/linux/math64.h:6,
from include/linux/time.h:6,
from include/linux/stat.h:19,
from include/linux/fs_dirent.h:5,
from include/linux/fs/super_types.h:5,
from include/linux/fs/super.h:5,
from include/linux/fs.h:5,
from include/linux/dax.h:5,
from mm/filemap.c:15:
mm/filemap.c: In function '__filemap_add_folio_xa_locked':
>> include/linux/lockdep.h:252:61: error: invalid type argument of '->' (have 'spinlock_t' {aka 'struct spinlock'})
252 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
| ^~
include/asm-generic/bug.h:110:32: note: in definition of macro 'WARN_ON'
110 | int __ret_warn_on = !!(condition); \
| ^~~~~~~~~
include/linux/lockdep.h:285:9: note: in expansion of macro 'lockdep_assert'
285 | lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
| ^~~~~~~~~~~~~~
include/linux/lockdep.h:285:24: note: in expansion of macro 'lockdep_is_held'
285 | lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
| ^~~~~~~~~~~~~~~
mm/filemap.c:861:9: note: in expansion of macro 'lockdep_assert_held'
861 | lockdep_assert_held(xas->xa->xa_lock);
| ^~~~~~~~~~~~~~~~~~~
mm/filemap.c: In function '__filemap_add_folio':
>> include/linux/lockdep.h:252:61: error: invalid type argument of '->' (have 'spinlock_t' {aka 'struct spinlock'})
252 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
| ^~
include/asm-generic/bug.h:110:32: note: in definition of macro 'WARN_ON'
110 | int __ret_warn_on = !!(condition); \
| ^~~~~~~~~
include/linux/lockdep.h:285:9: note: in expansion of macro 'lockdep_assert'
285 | lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
| ^~~~~~~~~~~~~~
include/linux/lockdep.h:285:24: note: in expansion of macro 'lockdep_is_held'
285 | lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
| ^~~~~~~~~~~~~~~
mm/filemap.c:941:17: note: in expansion of macro 'lockdep_assert_held'
941 | lockdep_assert_held(xas->xa->xa_lock);
| ^~~~~~~~~~~~~~~~~~~
>> include/linux/lockdep.h:252:61: error: invalid type argument of '->' (have 'spinlock_t' {aka 'struct spinlock'})
252 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
| ^~
include/asm-generic/bug.h:110:32: note: in definition of macro 'WARN_ON'
110 | int __ret_warn_on = !!(condition); \
| ^~~~~~~~~
include/linux/lockdep.h:288:9: note: in expansion of macro 'lockdep_assert'
288 | lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD)
| ^~~~~~~~~~~~~~
include/linux/lockdep.h:288:24: note: in expansion of macro 'lockdep_is_held'
288 | lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD)
| ^~~~~~~~~~~~~~~
mm/filemap.c:944:17: note: in expansion of macro 'lockdep_assert_not_held'
944 | lockdep_assert_not_held(xas->xa->xa_lock);
| ^~~~~~~~~~~~~~~~~~~~~~~
vim +252 include/linux/lockdep.h
f607c668577481 Peter Zijlstra 2009-07-20 251
f8319483f57f1c Peter Zijlstra 2016-11-30 @252 #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
f8319483f57f1c Peter Zijlstra 2016-11-30 253 #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r))
f607c668577481 Peter Zijlstra 2009-07-20 254
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
© 2016 - 2026 Red Hat, Inc.