In order to implement preemptible object mapping we need a zspage lock
that satisfies several preconditions:
- it should be reader-write type of a lock
- it should be possible to hold it from any context, but also being
preemptible if the context allows it
- we never sleep while acquiring but can sleep while holding in read
mode
An rwsemaphore doesn't suffice, due to atomicity requirements, rwlock
doesn't satisfy due to reader-preemptability requirement. It's also
worth to mention, that per-zspage rwsem is a little too memory heavy
(we can easily have double digits megabytes used only on rwsemaphores).
Switch over from rwlock_t to a atomic_t-based implementation of a
reader-writer semaphore that satisfies all of the preconditions.
The spin-lock based zspage_lock is suggested by Hillf Danton.
Suggested-by: Hillf Danton <hdanton@sina.com>
Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
---
mm/zsmalloc.c | 246 +++++++++++++++++++++++++++++++++++++++-----------
1 file changed, 192 insertions(+), 54 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 2e338cde0d21..bc679a3e1718 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -226,6 +226,9 @@ struct zs_pool {
/* protect zspage migration/compaction */
rwlock_t lock;
atomic_t compaction_in_progress;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lock_class_key lock_class;
+#endif
};
static inline void zpdesc_set_first(struct zpdesc *zpdesc)
@@ -257,6 +260,18 @@ static inline void free_zpdesc(struct zpdesc *zpdesc)
__free_page(page);
}
+#define ZS_PAGE_UNLOCKED 0
+#define ZS_PAGE_WRLOCKED -1
+
+struct zspage_lock {
+ spinlock_t lock;
+ int cnt;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
struct zspage {
struct {
unsigned int huge:HUGE_BITS;
@@ -269,7 +284,7 @@ struct zspage {
struct zpdesc *first_zpdesc;
struct list_head list; /* fullness list */
struct zs_pool *pool;
- rwlock_t lock;
+ struct zspage_lock zsl;
};
struct mapping_area {
@@ -279,6 +294,148 @@ struct mapping_area {
enum zs_mapmode vm_mm; /* mapping mode */
};
+static void zspage_lock_init(struct zspage *zspage)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ lockdep_init_map(&zspage->zsl.dep_map, "zspage->lock",
+ &zspage->pool->lock_class, 0);
+#endif
+
+ spin_lock_init(&zspage->zsl.lock);
+ zspage->zsl.cnt = ZS_PAGE_UNLOCKED;
+}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static inline void __read_lock(struct zspage *zspage)
+{
+ struct zspage_lock *zsl = &zspage->zsl;
+
+ rwsem_acquire_read(&zsl->dep_map, 0, 0, _RET_IP_);
+
+ spin_lock(&zsl->lock);
+ zsl->cnt++;
+ spin_unlock(&zsl->lock);
+
+ lock_acquired(&zsl->dep_map, _RET_IP_);
+}
+
+static inline void __read_unlock(struct zspage *zspage)
+{
+ struct zspage_lock *zsl = &zspage->zsl;
+
+ rwsem_release(&zsl->dep_map, _RET_IP_);
+
+ spin_lock(&zsl->lock);
+ zsl->cnt--;
+ spin_unlock(&zsl->lock);
+}
+
+static inline bool __write_trylock(struct zspage *zspage)
+{
+ struct zspage_lock *zsl = &zspage->zsl;
+
+ spin_lock(&zsl->lock);
+ if (zsl->cnt == ZS_PAGE_UNLOCKED) {
+ zsl->cnt = ZS_PAGE_WRLOCKED;
+ rwsem_acquire(&zsl->dep_map, 0, 1, _RET_IP_);
+ lock_acquired(&zsl->dep_map, _RET_IP_);
+ return true;
+ }
+
+ lock_contended(&zsl->dep_map, _RET_IP_);
+ spin_unlock(&zsl->lock);
+ return false;
+}
+
+static inline void __write_unlock(struct zspage *zspage)
+{
+ struct zspage_lock *zsl = &zspage->zsl;
+
+ rwsem_release(&zsl->dep_map, _RET_IP_);
+
+ zsl->cnt = ZS_PAGE_UNLOCKED;
+ spin_unlock(&zsl->lock);
+}
+#else
+static inline void __read_lock(struct zspage *zspage)
+{
+ struct zspage_lock *zsl = &zspage->zsl;
+
+ spin_lock(&zsl->lock);
+ zsl->cnt++;
+ spin_unlock(&zsl->lock);
+}
+
+static inline void __read_unlock(struct zspage *zspage)
+{
+ struct zspage_lock *zsl = &zspage->zsl;
+
+ spin_lock(&zsl->lock);
+ zsl->cnt--;
+ spin_unlock(&zsl->lock);
+}
+
+static inline bool __write_trylock(struct zspage *zspage)
+{
+ struct zspage_lock *zsl = &zspage->zsl;
+
+ spin_lock(&zsl->lock);
+ if (zsl->cnt == ZS_PAGE_UNLOCKED) {
+ zsl->cnt = ZS_PAGE_WRLOCKED;
+ return true;
+ }
+
+ spin_unlock(&zsl->lock);
+ return false;
+}
+
+static inline void __write_unlock(struct zspage *zspage)
+{
+ struct zspage_lock *zsl = &zspage->zsl;
+
+ zsl->cnt = ZS_PAGE_UNLOCKED;
+ spin_unlock(&zsl->lock);
+}
+#endif /* CONFIG_DEBUG_LOCK_ALLOC */
+
+/*
+ * The zspage lock can be held from atomic contexts, but it needs to remain
+ * preemptible when held for reading because it remains held outside of those
+ * atomic contexts, otherwise we unnecessarily lose preemptibility.
+ *
+ * To achieve this, the following rules are enforced on readers and writers:
+ *
+ * - Writers are blocked by both writers and readers, while readers are only
+ * blocked by writers (i.e. normal rwlock semantics).
+ *
+ * - Writers are always atomic (to allow readers to spin waiting for them).
+ *
+ * - Writers always use trylock (as the lock may be held be sleeping readers).
+ *
+ * - Readers may spin on the lock (as they can only wait for atomic writers).
+ *
+ * - Readers may sleep while holding the lock (as writes only use trylock).
+ */
+static void zspage_read_lock(struct zspage *zspage)
+{
+ return __read_lock(zspage);
+}
+
+static void zspage_read_unlock(struct zspage *zspage)
+{
+ return __read_unlock(zspage);
+}
+
+static __must_check bool zspage_write_trylock(struct zspage *zspage)
+{
+ return __write_trylock(zspage);
+}
+
+static void zspage_write_unlock(struct zspage *zspage)
+{
+ return __write_unlock(zspage);
+}
+
/* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
static void SetZsHugePage(struct zspage *zspage)
{
@@ -290,12 +447,6 @@ static bool ZsHugePage(struct zspage *zspage)
return zspage->huge;
}
-static void migrate_lock_init(struct zspage *zspage);
-static void migrate_read_lock(struct zspage *zspage);
-static void migrate_read_unlock(struct zspage *zspage);
-static void migrate_write_lock(struct zspage *zspage);
-static void migrate_write_unlock(struct zspage *zspage);
-
#ifdef CONFIG_COMPACTION
static void kick_deferred_free(struct zs_pool *pool);
static void init_deferred_free(struct zs_pool *pool);
@@ -992,7 +1143,9 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
return NULL;
zspage->magic = ZSPAGE_MAGIC;
- migrate_lock_init(zspage);
+ zspage->pool = pool;
+ zspage->class = class->index;
+ zspage_lock_init(zspage);
for (i = 0; i < class->pages_per_zspage; i++) {
struct zpdesc *zpdesc;
@@ -1015,8 +1168,6 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
create_page_chain(class, zspage, zpdescs);
init_zspage(class, zspage);
- zspage->pool = pool;
- zspage->class = class->index;
return zspage;
}
@@ -1217,7 +1368,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
* zs_unmap_object API so delegate the locking from class to zspage
* which is smaller granularity.
*/
- migrate_read_lock(zspage);
+ zspage_read_lock(zspage);
read_unlock(&pool->lock);
class = zspage_class(pool, zspage);
@@ -1277,7 +1428,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
}
local_unlock(&zs_map_area.lock);
- migrate_read_unlock(zspage);
+ zspage_read_unlock(zspage);
}
EXPORT_SYMBOL_GPL(zs_unmap_object);
@@ -1671,18 +1822,18 @@ static void lock_zspage(struct zspage *zspage)
/*
* Pages we haven't locked yet can be migrated off the list while we're
* trying to lock them, so we need to be careful and only attempt to
- * lock each page under migrate_read_lock(). Otherwise, the page we lock
+ * lock each page under zspage_read_lock(). Otherwise, the page we lock
* may no longer belong to the zspage. This means that we may wait for
* the wrong page to unlock, so we must take a reference to the page
- * prior to waiting for it to unlock outside migrate_read_lock().
+ * prior to waiting for it to unlock outside zspage_read_lock().
*/
while (1) {
- migrate_read_lock(zspage);
+ zspage_read_lock(zspage);
zpdesc = get_first_zpdesc(zspage);
if (zpdesc_trylock(zpdesc))
break;
zpdesc_get(zpdesc);
- migrate_read_unlock(zspage);
+ zspage_read_unlock(zspage);
zpdesc_wait_locked(zpdesc);
zpdesc_put(zpdesc);
}
@@ -1693,41 +1844,16 @@ static void lock_zspage(struct zspage *zspage)
curr_zpdesc = zpdesc;
} else {
zpdesc_get(zpdesc);
- migrate_read_unlock(zspage);
+ zspage_read_unlock(zspage);
zpdesc_wait_locked(zpdesc);
zpdesc_put(zpdesc);
- migrate_read_lock(zspage);
+ zspage_read_lock(zspage);
}
}
- migrate_read_unlock(zspage);
+ zspage_read_unlock(zspage);
}
#endif /* CONFIG_COMPACTION */
-static void migrate_lock_init(struct zspage *zspage)
-{
- rwlock_init(&zspage->lock);
-}
-
-static void migrate_read_lock(struct zspage *zspage) __acquires(&zspage->lock)
-{
- read_lock(&zspage->lock);
-}
-
-static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock)
-{
- read_unlock(&zspage->lock);
-}
-
-static void migrate_write_lock(struct zspage *zspage)
-{
- write_lock(&zspage->lock);
-}
-
-static void migrate_write_unlock(struct zspage *zspage)
-{
- write_unlock(&zspage->lock);
-}
-
#ifdef CONFIG_COMPACTION
static const struct movable_operations zsmalloc_mops;
@@ -1769,7 +1895,7 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
}
static int zs_page_migrate(struct page *newpage, struct page *page,
- enum migrate_mode mode)
+ enum migrate_mode mode)
{
struct zs_pool *pool;
struct size_class *class;
@@ -1785,9 +1911,6 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
VM_BUG_ON_PAGE(!zpdesc_is_isolated(zpdesc), zpdesc_page(zpdesc));
- /* We're committed, tell the world that this is a Zsmalloc page. */
- __zpdesc_set_zsmalloc(newzpdesc);
-
/* The page is locked, so this pointer must remain valid */
zspage = get_zspage(zpdesc);
pool = zspage->pool;
@@ -1803,8 +1926,15 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
* the class lock protects zpage alloc/free in the zspage.
*/
spin_lock(&class->lock);
- /* the migrate_write_lock protects zpage access via zs_map_object */
- migrate_write_lock(zspage);
+ /* the zspage write_lock protects zpage access via zs_map_object */
+ if (!zspage_write_trylock(zspage)) {
+ spin_unlock(&class->lock);
+ write_unlock(&pool->lock);
+ return -EINVAL;
+ }
+
+ /* We're committed, tell the world that this is a Zsmalloc page. */
+ __zpdesc_set_zsmalloc(newzpdesc);
offset = get_first_obj_offset(zpdesc);
s_addr = kmap_local_zpdesc(zpdesc);
@@ -1835,7 +1965,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
*/
write_unlock(&pool->lock);
spin_unlock(&class->lock);
- migrate_write_unlock(zspage);
+ zspage_write_unlock(zspage);
zpdesc_get(newzpdesc);
if (zpdesc_zone(newzpdesc) != zpdesc_zone(zpdesc)) {
@@ -1971,9 +2101,11 @@ static unsigned long __zs_compact(struct zs_pool *pool,
if (!src_zspage)
break;
- migrate_write_lock(src_zspage);
+ if (!zspage_write_trylock(src_zspage))
+ break;
+
migrate_zspage(pool, src_zspage, dst_zspage);
- migrate_write_unlock(src_zspage);
+ zspage_write_unlock(src_zspage);
fg = putback_zspage(class, src_zspage);
if (fg == ZS_INUSE_RATIO_0) {
@@ -2233,7 +2365,9 @@ struct zs_pool *zs_create_pool(const char *name)
* trigger compaction manually. Thus, ignore return code.
*/
zs_register_shrinker(pool);
-
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ lockdep_register_key(&pool->lock_class);
+#endif
return pool;
err:
@@ -2270,6 +2404,10 @@ void zs_destroy_pool(struct zs_pool *pool)
kfree(class);
}
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ lockdep_unregister_key(&pool->lock_class);
+#endif
+
destroy_cache(pool);
kfree(pool->name);
kfree(pool);
--
2.48.1.601.g30ceb7b040-goog
On Fri, Feb 14, 2025 at 01:50:23PM +0900, Sergey Senozhatsky wrote:
> In order to implement preemptible object mapping we need a zspage lock
> that satisfies several preconditions:
> - it should be reader-write type of a lock
> - it should be possible to hold it from any context, but also being
> preemptible if the context allows it
> - we never sleep while acquiring but can sleep while holding in read
> mode
>
> An rwsemaphore doesn't suffice, due to atomicity requirements, rwlock
> doesn't satisfy due to reader-preemptability requirement. It's also
> worth to mention, that per-zspage rwsem is a little too memory heavy
> (we can easily have double digits megabytes used only on rwsemaphores).
>
> Switch over from rwlock_t to a atomic_t-based implementation of a
> reader-writer semaphore that satisfies all of the preconditions.
>
> The spin-lock based zspage_lock is suggested by Hillf Danton.
>
> Suggested-by: Hillf Danton <hdanton@sina.com>
> Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
> ---
> mm/zsmalloc.c | 246 +++++++++++++++++++++++++++++++++++++++-----------
> 1 file changed, 192 insertions(+), 54 deletions(-)
>
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index 2e338cde0d21..bc679a3e1718 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -226,6 +226,9 @@ struct zs_pool {
> /* protect zspage migration/compaction */
> rwlock_t lock;
> atomic_t compaction_in_progress;
> +#ifdef CONFIG_DEBUG_LOCK_ALLOC
> + struct lock_class_key lock_class;
> +#endif
> };
>
> static inline void zpdesc_set_first(struct zpdesc *zpdesc)
> @@ -257,6 +260,18 @@ static inline void free_zpdesc(struct zpdesc *zpdesc)
> __free_page(page);
> }
>
> +#define ZS_PAGE_UNLOCKED 0
> +#define ZS_PAGE_WRLOCKED -1
> +
> +struct zspage_lock {
> + spinlock_t lock;
> + int cnt;
> +
> +#ifdef CONFIG_DEBUG_LOCK_ALLOC
> + struct lockdep_map dep_map;
> +#endif
> +};
> +
> struct zspage {
> struct {
> unsigned int huge:HUGE_BITS;
> @@ -269,7 +284,7 @@ struct zspage {
> struct zpdesc *first_zpdesc;
> struct list_head list; /* fullness list */
> struct zs_pool *pool;
> - rwlock_t lock;
> + struct zspage_lock zsl;
> };
>
> struct mapping_area {
> @@ -279,6 +294,148 @@ struct mapping_area {
> enum zs_mapmode vm_mm; /* mapping mode */
> };
>
> +static void zspage_lock_init(struct zspage *zspage)
> +{
> +#ifdef CONFIG_DEBUG_LOCK_ALLOC
> + lockdep_init_map(&zspage->zsl.dep_map, "zspage->lock",
> + &zspage->pool->lock_class, 0);
> +#endif
> +
> + spin_lock_init(&zspage->zsl.lock);
> + zspage->zsl.cnt = ZS_PAGE_UNLOCKED;
> +}
> +
> +#ifdef CONFIG_DEBUG_LOCK_ALLOC
Instead of the #ifdef and repeating all the functions, can't we do
something like:
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#define zspage_lock_map(zsl) (&zsl->dep_map)
#else
#define zspage_lock_map(zsl) /* empty or NULL */
#endif
Then we can just have one version of the functions and use
zspage_lock_map() instead of zsl->dep_map, right?
> +static inline void __read_lock(struct zspage *zspage)
> +{
> + struct zspage_lock *zsl = &zspage->zsl;
> +
> + rwsem_acquire_read(&zsl->dep_map, 0, 0, _RET_IP_);
> +
> + spin_lock(&zsl->lock);
> + zsl->cnt++;
Shouldn't we check if the lock is write locked?
> + spin_unlock(&zsl->lock);
> +
> + lock_acquired(&zsl->dep_map, _RET_IP_);
> +}
> +
> +static inline void __read_unlock(struct zspage *zspage)
> +{
> + struct zspage_lock *zsl = &zspage->zsl;
> +
> + rwsem_release(&zsl->dep_map, _RET_IP_);
> +
> + spin_lock(&zsl->lock);
> + zsl->cnt--;
> + spin_unlock(&zsl->lock);
> +}
On (25/02/20 19:18), Yosry Ahmed wrote:
[..]
> > +static void zspage_lock_init(struct zspage *zspage)
> > +{
> > +#ifdef CONFIG_DEBUG_LOCK_ALLOC
> > + lockdep_init_map(&zspage->zsl.dep_map, "zspage->lock",
> > + &zspage->pool->lock_class, 0);
> > +#endif
> > +
> > + spin_lock_init(&zspage->zsl.lock);
> > + zspage->zsl.cnt = ZS_PAGE_UNLOCKED;
> > +}
> > +
> > +#ifdef CONFIG_DEBUG_LOCK_ALLOC
>
> Instead of the #ifdef and repeating all the functions, can't we do
> something like:
>
> #ifdef CONFIG_DEBUG_LOCK_ALLOC
> #define zspage_lock_map(zsl) (&zsl->dep_map)
> #else
> #define zspage_lock_map(zsl) /* empty or NULL */
> #endif
>
> Then we can just have one version of the functions and use
> zspage_lock_map() instead of zsl->dep_map, right?
Yeah, maybe.
On Thu, Feb 20, 2025 at 07:18:40PM +0000, Yosry Ahmed wrote:
> On Fri, Feb 14, 2025 at 01:50:23PM +0900, Sergey Senozhatsky wrote:
> > In order to implement preemptible object mapping we need a zspage lock
> > that satisfies several preconditions:
> > - it should be reader-write type of a lock
> > - it should be possible to hold it from any context, but also being
> > preemptible if the context allows it
> > - we never sleep while acquiring but can sleep while holding in read
> > mode
> >
> > An rwsemaphore doesn't suffice, due to atomicity requirements, rwlock
> > doesn't satisfy due to reader-preemptability requirement. It's also
> > worth to mention, that per-zspage rwsem is a little too memory heavy
> > (we can easily have double digits megabytes used only on rwsemaphores).
> >
> > Switch over from rwlock_t to a atomic_t-based implementation of a
> > reader-writer semaphore that satisfies all of the preconditions.
> >
> > The spin-lock based zspage_lock is suggested by Hillf Danton.
> >
> > Suggested-by: Hillf Danton <hdanton@sina.com>
> > Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
> > ---
> > mm/zsmalloc.c | 246 +++++++++++++++++++++++++++++++++++++++-----------
> > 1 file changed, 192 insertions(+), 54 deletions(-)
> >
> > diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> > index 2e338cde0d21..bc679a3e1718 100644
> > --- a/mm/zsmalloc.c
> > +++ b/mm/zsmalloc.c
> > @@ -226,6 +226,9 @@ struct zs_pool {
> > /* protect zspage migration/compaction */
> > rwlock_t lock;
> > atomic_t compaction_in_progress;
> > +#ifdef CONFIG_DEBUG_LOCK_ALLOC
> > + struct lock_class_key lock_class;
> > +#endif
> > };
> >
> > static inline void zpdesc_set_first(struct zpdesc *zpdesc)
> > @@ -257,6 +260,18 @@ static inline void free_zpdesc(struct zpdesc *zpdesc)
> > __free_page(page);
> > }
> >
> > +#define ZS_PAGE_UNLOCKED 0
> > +#define ZS_PAGE_WRLOCKED -1
> > +
> > +struct zspage_lock {
> > + spinlock_t lock;
> > + int cnt;
> > +
> > +#ifdef CONFIG_DEBUG_LOCK_ALLOC
> > + struct lockdep_map dep_map;
> > +#endif
> > +};
> > +
> > struct zspage {
> > struct {
> > unsigned int huge:HUGE_BITS;
> > @@ -269,7 +284,7 @@ struct zspage {
> > struct zpdesc *first_zpdesc;
> > struct list_head list; /* fullness list */
> > struct zs_pool *pool;
> > - rwlock_t lock;
> > + struct zspage_lock zsl;
> > };
> >
> > struct mapping_area {
> > @@ -279,6 +294,148 @@ struct mapping_area {
> > enum zs_mapmode vm_mm; /* mapping mode */
> > };
> >
> > +static void zspage_lock_init(struct zspage *zspage)
> > +{
> > +#ifdef CONFIG_DEBUG_LOCK_ALLOC
> > + lockdep_init_map(&zspage->zsl.dep_map, "zspage->lock",
> > + &zspage->pool->lock_class, 0);
> > +#endif
> > +
> > + spin_lock_init(&zspage->zsl.lock);
> > + zspage->zsl.cnt = ZS_PAGE_UNLOCKED;
> > +}
> > +
> > +#ifdef CONFIG_DEBUG_LOCK_ALLOC
>
> Instead of the #ifdef and repeating all the functions, can't we do
> something like:
>
> #ifdef CONFIG_DEBUG_LOCK_ALLOC
> #define zspage_lock_map(zsl) (&zsl->dep_map)
> #else
> #define zspage_lock_map(zsl) /* empty or NULL */
> #endif
>
> Then we can just have one version of the functions and use
> zspage_lock_map() instead of zsl->dep_map, right?
>
> > +static inline void __read_lock(struct zspage *zspage)
> > +{
> > + struct zspage_lock *zsl = &zspage->zsl;
> > +
> > + rwsem_acquire_read(&zsl->dep_map, 0, 0, _RET_IP_);
> > +
> > + spin_lock(&zsl->lock);
> > + zsl->cnt++;
>
> Shouldn't we check if the lock is write locked?
Never mind we keep the spinlock held on write locking.
© 2016 - 2025 Red Hat, Inc.