Concurrent modifications of meta table entries is now handled
by per-entry spin-lock. This has a number of shortcomings.
First, this imposes atomic requirements on compression backends.
zram can call both zcomp_compress() and zcomp_decompress() under
entry spin-lock, which implies that we can use only compression
algorithms that don't schedule/sleep/wait during compression and
decompression. This, for instance, makes it impossible to use
some of the ASYNC compression algorithms (H/W compression, etc.)
implementations.
Second, this can potentially trigger watchdogs. For example,
entry re-compression with secondary algorithms is performed
under entry spin-lock. Given that we chain secondary
compression algorithms and that some of them can be configured
for best compression ratio (and worst compression speed) zram
can stay under spin-lock for quite some time.
Having a per-entry mutex (or, for instance, a rw-semaphore)
significantly increases sizeof() of each entry and hence the
meta table. Therefore entry locking returns back to bit
locking, as before, however, this time also preempt-rt friendly,
because if waits-on-bit instead of spinning-on-bit. Lock owners
are also now permitted to schedule, which is a first step on the
path of making zram non-atomic.
Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
---
drivers/block/zram/zram_drv.c | 62 ++++++++++++++++++++++++++++-------
drivers/block/zram/zram_drv.h | 20 +++++++----
2 files changed, 65 insertions(+), 17 deletions(-)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 9f5020b077c5..37c5651305c2 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -58,19 +58,62 @@ static void zram_free_page(struct zram *zram, size_t index);
static int zram_read_from_zspool(struct zram *zram, struct page *page,
u32 index);
-static int zram_slot_trylock(struct zram *zram, u32 index)
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define slot_dep_map(zram, index) (&(zram)->table[(index)].dep_map)
+#define zram_lock_class(zram) (&(zram)->lock_class)
+#else
+#define slot_dep_map(zram, index) NULL
+#define zram_lock_class(zram) NULL
+#endif
+
+static void zram_slot_lock_init(struct zram *zram, u32 index)
{
- return spin_trylock(&zram->table[index].lock);
+ lockdep_init_map(slot_dep_map(zram, index),
+ "zram->table[index].lock",
+ zram_lock_class(zram), 0);
+}
+
+/*
+ * entry locking rules:
+ *
+ * 1) Lock is exclusive
+ *
+ * 2) lock() function can sleep waiting for the lock
+ *
+ * 3) Lock owner can sleep
+ *
+ * 4) Use TRY lock variant when in atomic context
+ * - must check return value and handle locking failers
+ */
+static __must_check bool zram_slot_trylock(struct zram *zram, u32 index)
+{
+ unsigned long *lock = &zram->table[index].flags;
+
+ if (!test_and_set_bit_lock(ZRAM_ENTRY_LOCK, lock)) {
+ mutex_acquire(slot_dep_map(zram, index), 0, 1, _RET_IP_);
+ lock_acquired(slot_dep_map(zram, index), _RET_IP_);
+ return true;
+ }
+
+ lock_contended(slot_dep_map(zram, index), _RET_IP_);
+ return false;
}
static void zram_slot_lock(struct zram *zram, u32 index)
{
- spin_lock(&zram->table[index].lock);
+ unsigned long *lock = &zram->table[index].flags;
+
+ mutex_acquire(slot_dep_map(zram, index), 0, 0, _RET_IP_);
+ wait_on_bit_lock(lock, ZRAM_ENTRY_LOCK, TASK_UNINTERRUPTIBLE);
+ lock_acquired(slot_dep_map(zram, index), _RET_IP_);
}
static void zram_slot_unlock(struct zram *zram, u32 index)
{
- spin_unlock(&zram->table[index].lock);
+ unsigned long *lock = &zram->table[index].flags;
+
+ mutex_release(slot_dep_map(zram, index), _RET_IP_);
+ clear_and_wake_up_bit(ZRAM_ENTRY_LOCK, lock);
}
static inline bool init_done(struct zram *zram)
@@ -93,7 +136,6 @@ static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
zram->table[index].handle = handle;
}
-/* flag operations require table entry bit_spin_lock() being held */
static bool zram_test_flag(struct zram *zram, u32 index,
enum zram_pageflags flag)
{
@@ -1473,15 +1515,11 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
huge_class_size = zs_huge_class_size(zram->mem_pool);
for (index = 0; index < num_pages; index++)
- spin_lock_init(&zram->table[index].lock);
+ zram_slot_lock_init(zram, index);
+
return true;
}
-/*
- * To protect concurrent access to the same index entry,
- * caller should hold this table index entry's bit_spinlock to
- * indicate this index entry is accessing.
- */
static void zram_free_page(struct zram *zram, size_t index)
{
unsigned long handle;
@@ -2625,6 +2663,7 @@ static int zram_add(void)
if (ret)
goto out_cleanup_disk;
+ lockdep_register_key(zram_lock_class(zram));
zram_debugfs_register(zram);
pr_info("Added device: %s\n", zram->disk->disk_name);
return device_id;
@@ -2653,6 +2692,7 @@ static int zram_remove(struct zram *zram)
zram->claim = true;
mutex_unlock(&zram->disk->open_mutex);
+ lockdep_unregister_key(zram_lock_class(zram));
zram_debugfs_unregister(zram);
if (claimed) {
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index db78d7c01b9a..794c9234e627 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -28,7 +28,6 @@
#define ZRAM_SECTOR_PER_LOGICAL_BLOCK \
(1 << (ZRAM_LOGICAL_BLOCK_SHIFT - SECTOR_SHIFT))
-
/*
* ZRAM is mainly used for memory efficiency so we want to keep memory
* footprint small and thus squeeze size and zram pageflags into a flags
@@ -46,6 +45,7 @@
/* Flags for zram pages (table[page_no].flags) */
enum zram_pageflags {
ZRAM_SAME = ZRAM_FLAG_SHIFT, /* Page consists the same element */
+ ZRAM_ENTRY_LOCK, /* entry access lock bit */
ZRAM_WB, /* page is stored on backing_device */
ZRAM_PP_SLOT, /* Selected for post-processing */
ZRAM_HUGE, /* Incompressible page */
@@ -58,13 +58,18 @@ enum zram_pageflags {
__NR_ZRAM_PAGEFLAGS,
};
-/*-- Data structures */
-
-/* Allocated for each disk page */
+/*
+ * Allocated for each disk page. We use bit-lock (ZRAM_ENTRY_LOCK bit
+ * of flags) to save memory. There can be plenty of entries and standard
+ * locking primitives (e.g. mutex) will significantly increase sizeof()
+ * of each entry and hence of the meta table.
+ */
struct zram_table_entry {
unsigned long handle;
- unsigned int flags;
- spinlock_t lock;
+ unsigned long flags;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
ktime_t ac_time;
#endif
@@ -137,5 +142,8 @@ struct zram {
struct dentry *debugfs_dir;
#endif
atomic_t pp_in_progress;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lock_class_key lock_class;
+#endif
};
#endif
--
2.48.1.601.g30ceb7b040-goog
On 2025-02-22 07:25:32 [+0900], Sergey Senozhatsky wrote:
…
> diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
> index 9f5020b077c5..37c5651305c2 100644
> --- a/drivers/block/zram/zram_drv.c
> +++ b/drivers/block/zram/zram_drv.c
> @@ -58,19 +58,62 @@ static void zram_free_page(struct zram *zram, size_t index);
> static int zram_read_from_zspool(struct zram *zram, struct page *page,
> u32 index);
>
> -static int zram_slot_trylock(struct zram *zram, u32 index)
> +#ifdef CONFIG_DEBUG_LOCK_ALLOC
> +#define slot_dep_map(zram, index) (&(zram)->table[(index)].dep_map)
> +#define zram_lock_class(zram) (&(zram)->lock_class)
> +#else
> +#define slot_dep_map(zram, index) NULL
> +#define zram_lock_class(zram) NULL
> +#endif
That CONFIG_DEBUG_LOCK_ALLOC here is not needed because dep_map as well
as lock_class goes away in !CONFIG_DEBUG_LOCK_ALLOC case.
> +static void zram_slot_lock_init(struct zram *zram, u32 index)
> {
> - return spin_trylock(&zram->table[index].lock);
> + lockdep_init_map(slot_dep_map(zram, index),
> + "zram->table[index].lock",
> + zram_lock_class(zram), 0);
> +}
Why do need zram_lock_class and slot_dep_map? As far as I can tell, you
init both in the same place and you acquire both in the same place.
Therefore it looks like you tell lockdep that you acquire two locks
while it would be enough to do it with one.
> +/*
> + * entry locking rules:
> + *
> + * 1) Lock is exclusive
> + *
> + * 2) lock() function can sleep waiting for the lock
> + *
> + * 3) Lock owner can sleep
> + *
> + * 4) Use TRY lock variant when in atomic context
> + * - must check return value and handle locking failers
> + */
> +static __must_check bool zram_slot_trylock(struct zram *zram, u32 index)
> +{
> + unsigned long *lock = &zram->table[index].flags;
> +
> + if (!test_and_set_bit_lock(ZRAM_ENTRY_LOCK, lock)) {
> + mutex_acquire(slot_dep_map(zram, index), 0, 1, _RET_IP_);
> + lock_acquired(slot_dep_map(zram, index), _RET_IP_);
> + return true;
> + }
> +
> + lock_contended(slot_dep_map(zram, index), _RET_IP_);
> + return false;
> }
>
> static void zram_slot_lock(struct zram *zram, u32 index)
> {
> - spin_lock(&zram->table[index].lock);
> + unsigned long *lock = &zram->table[index].flags;
> +
> + mutex_acquire(slot_dep_map(zram, index), 0, 0, _RET_IP_);
> + wait_on_bit_lock(lock, ZRAM_ENTRY_LOCK, TASK_UNINTERRUPTIBLE);
> + lock_acquired(slot_dep_map(zram, index), _RET_IP_);
This looks odd. The first mutex_acquire() can be invoked twice by two
threads, right? The first thread gets both (mutex_acquire() and
lock_acquired()) while, the second gets mutex_acquire() and blocks on
wait_on_bit_lock()).
> }
>
> static void zram_slot_unlock(struct zram *zram, u32 index)
> {
> - spin_unlock(&zram->table[index].lock);
> + unsigned long *lock = &zram->table[index].flags;
> +
> + mutex_release(slot_dep_map(zram, index), _RET_IP_);
> + clear_and_wake_up_bit(ZRAM_ENTRY_LOCK, lock);
> }
>
> static inline bool init_done(struct zram *zram)
…
> diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
> index db78d7c01b9a..794c9234e627 100644
> --- a/drivers/block/zram/zram_drv.h
> +++ b/drivers/block/zram/zram_drv.h
> @@ -58,13 +58,18 @@ enum zram_pageflags {
> __NR_ZRAM_PAGEFLAGS,
> };
>
> -/*-- Data structures */
> -
> -/* Allocated for each disk page */
> +/*
> + * Allocated for each disk page. We use bit-lock (ZRAM_ENTRY_LOCK bit
> + * of flags) to save memory. There can be plenty of entries and standard
> + * locking primitives (e.g. mutex) will significantly increase sizeof()
> + * of each entry and hence of the meta table.
> + */
> struct zram_table_entry {
> unsigned long handle;
> - unsigned int flags;
> - spinlock_t lock;
> + unsigned long flags;
> +#ifdef CONFIG_DEBUG_LOCK_ALLOC
> + struct lockdep_map dep_map;
> +#endif
> #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
> ktime_t ac_time;
> #endif
> @@ -137,5 +142,8 @@ struct zram {
> struct dentry *debugfs_dir;
> #endif
> atomic_t pp_in_progress;
> +#ifdef CONFIG_DEBUG_LOCK_ALLOC
> + struct lock_class_key lock_class;
> +#endif
As mentioned earlier, no need for CONFIG_DEBUG_LOCK_ALLOC.
> };
> #endif
> --
> 2.48.1.601.g30ceb7b040-goog
>
On (25/02/24 09:19), Sebastian Andrzej Siewior wrote:
> > diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
> > index 9f5020b077c5..37c5651305c2 100644
> > --- a/drivers/block/zram/zram_drv.c
> > +++ b/drivers/block/zram/zram_drv.c
> > @@ -58,19 +58,62 @@ static void zram_free_page(struct zram *zram, size_t index);
> > static int zram_read_from_zspool(struct zram *zram, struct page *page,
> > u32 index);
> >
> > -static int zram_slot_trylock(struct zram *zram, u32 index)
> > +#ifdef CONFIG_DEBUG_LOCK_ALLOC
> > +#define slot_dep_map(zram, index) (&(zram)->table[(index)].dep_map)
> > +#define zram_lock_class(zram) (&(zram)->lock_class)
> > +#else
> > +#define slot_dep_map(zram, index) NULL
> > +#define zram_lock_class(zram) NULL
> > +#endif
>
> That CONFIG_DEBUG_LOCK_ALLOC here is not needed because dep_map as well
> as lock_class goes away in !CONFIG_DEBUG_LOCK_ALLOC case.
Let me give it a try.
> > +static void zram_slot_lock_init(struct zram *zram, u32 index)
> > {
> > - return spin_trylock(&zram->table[index].lock);
> > + lockdep_init_map(slot_dep_map(zram, index),
> > + "zram->table[index].lock",
> > + zram_lock_class(zram), 0);
> > +}
> Why do need zram_lock_class and slot_dep_map? As far as I can tell, you
> init both in the same place and you acquire both in the same place.
> Therefore it looks like you tell lockdep that you acquire two locks
> while it would be enough to do it with one.
Sorry, I'm not that familiar with lockdep, can you elaborate?
I don't think we can pass NULL as lock-class to lockdep_init_map(),
this should trigger `if (DEBUG_LOCKS_WARN_ON(!key))` as far as I
can tell. I guess it's something else that you are suggesting?
> > static void zram_slot_lock(struct zram *zram, u32 index)
> > {
> > - spin_lock(&zram->table[index].lock);
> > + unsigned long *lock = &zram->table[index].flags;
> > +
> > + mutex_acquire(slot_dep_map(zram, index), 0, 0, _RET_IP_);
> > + wait_on_bit_lock(lock, ZRAM_ENTRY_LOCK, TASK_UNINTERRUPTIBLE);
> > + lock_acquired(slot_dep_map(zram, index), _RET_IP_);
>
> This looks odd. The first mutex_acquire() can be invoked twice by two
> threads, right? The first thread gets both (mutex_acquire() and
> lock_acquired()) while, the second gets mutex_acquire() and blocks on
> wait_on_bit_lock()).
Hmm why is this a problem? ... and I'm pretty sure it was you who
suggested to put mutex_acquire() before wait_on_bit_lock() [1] ;)
[1] https://lore.kernel.org/all/20250206073803.c2tiyIq6@linutronix.de/
On 2025-02-25 13:51:31 [+0900], Sergey Senozhatsky wrote:
> > > +static void zram_slot_lock_init(struct zram *zram, u32 index)
> > > {
> > > - return spin_trylock(&zram->table[index].lock);
> > > + lockdep_init_map(slot_dep_map(zram, index),
> > > + "zram->table[index].lock",
> > > + zram_lock_class(zram), 0);
> > > +}
> > Why do need zram_lock_class and slot_dep_map? As far as I can tell, you
> > init both in the same place and you acquire both in the same place.
> > Therefore it looks like you tell lockdep that you acquire two locks
> > while it would be enough to do it with one.
>
> Sorry, I'm not that familiar with lockdep, can you elaborate?
> I don't think we can pass NULL as lock-class to lockdep_init_map(),
> this should trigger `if (DEBUG_LOCKS_WARN_ON(!key))` as far as I
> can tell. I guess it's something else that you are suggesting?
ach. Got it. What about
| static void zram_slot_lock_init(struct zram *zram, u32 index)
| {
| static struct lock_class_key __key;
|
| lockdep_init_map(slot_dep_map(zram, index),
| "zram->table[index].lock",
| &__key, 0);
| }
So every lock coming from zram belongs to the same class. Otherwise each
lock coming from zram_slot_lock_init() would belong to a different class
and for lockdep it would look like they are different locks. But they
are used always in the same way.
> > > static void zram_slot_lock(struct zram *zram, u32 index)
> > > {
> > > - spin_lock(&zram->table[index].lock);
> > > + unsigned long *lock = &zram->table[index].flags;
> > > +
> > > + mutex_acquire(slot_dep_map(zram, index), 0, 0, _RET_IP_);
> > > + wait_on_bit_lock(lock, ZRAM_ENTRY_LOCK, TASK_UNINTERRUPTIBLE);
> > > + lock_acquired(slot_dep_map(zram, index), _RET_IP_);
> >
> > This looks odd. The first mutex_acquire() can be invoked twice by two
> > threads, right? The first thread gets both (mutex_acquire() and
> > lock_acquired()) while, the second gets mutex_acquire() and blocks on
> > wait_on_bit_lock()).
>
> Hmm why is this a problem? ... and I'm pretty sure it was you who
> suggested to put mutex_acquire() before wait_on_bit_lock() [1] ;)
Sure. I was confused that you issue it twice. I didn't noticed the d in
lock_acquired(). So you have one for lockdep and one for lockstat. That
is okay ;)
Sebastian
On (25/02/27 13:05), Sebastian Andrzej Siewior wrote:
> > > > +static void zram_slot_lock_init(struct zram *zram, u32 index)
> > > > {
> > > > - return spin_trylock(&zram->table[index].lock);
> > > > + lockdep_init_map(slot_dep_map(zram, index),
> > > > + "zram->table[index].lock",
> > > > + zram_lock_class(zram), 0);
> > > > +}
> > > Why do need zram_lock_class and slot_dep_map? As far as I can tell, you
> > > init both in the same place and you acquire both in the same place.
> > > Therefore it looks like you tell lockdep that you acquire two locks
> > > while it would be enough to do it with one.
> >
> > Sorry, I'm not that familiar with lockdep, can you elaborate?
> > I don't think we can pass NULL as lock-class to lockdep_init_map(),
> > this should trigger `if (DEBUG_LOCKS_WARN_ON(!key))` as far as I
> > can tell. I guess it's something else that you are suggesting?
>
> ach. Got it. What about
>
> | static void zram_slot_lock_init(struct zram *zram, u32 index)
> | {
> | static struct lock_class_key __key;
> |
> | lockdep_init_map(slot_dep_map(zram, index),
> | "zram->table[index].lock",
> | &__key, 0);
> | }
>
> So every lock coming from zram belongs to the same class. Otherwise each
> lock coming from zram_slot_lock_init() would belong to a different class
> and for lockdep it would look like they are different locks. But they
> are used always in the same way.
I see. I thought that they key was "shared" between zram meta table
entries because the key is per-zram device, which sort of made sense
(we can have different zram devices in a system - one swap, a bunch
mounted with various file-systems on them).
I can do a 'static key', one for all zram devices.
> > > > static void zram_slot_lock(struct zram *zram, u32 index)
> > > > {
> > > > - spin_lock(&zram->table[index].lock);
> > > > + unsigned long *lock = &zram->table[index].flags;
> > > > +
> > > > + mutex_acquire(slot_dep_map(zram, index), 0, 0, _RET_IP_);
> > > > + wait_on_bit_lock(lock, ZRAM_ENTRY_LOCK, TASK_UNINTERRUPTIBLE);
> > > > + lock_acquired(slot_dep_map(zram, index), _RET_IP_);
> > >
> > > This looks odd. The first mutex_acquire() can be invoked twice by two
> > > threads, right? The first thread gets both (mutex_acquire() and
> > > lock_acquired()) while, the second gets mutex_acquire() and blocks on
> > > wait_on_bit_lock()).
> >
> > Hmm why is this a problem? ... and I'm pretty sure it was you who
> > suggested to put mutex_acquire() before wait_on_bit_lock() [1] ;)
>
> Sure. I was confused that you issue it twice. I didn't noticed the d in
> lock_acquired(). So you have one for lockdep and one for lockstat. That
> is okay ;)
Cool!
On (25/02/27 21:42), Sergey Senozhatsky wrote:
> > ach. Got it. What about
> >
> > | static void zram_slot_lock_init(struct zram *zram, u32 index)
> > | {
> > | static struct lock_class_key __key;
> > |
> > | lockdep_init_map(slot_dep_map(zram, index),
> > | "zram->table[index].lock",
> > | &__key, 0);
> > | }
> >
> > So every lock coming from zram belongs to the same class. Otherwise each
> > lock coming from zram_slot_lock_init() would belong to a different class
> > and for lockdep it would look like they are different locks. But they
> > are used always in the same way.
>
> I see. I thought that they key was "shared" between zram meta table
> entries because the key is per-zram device, which sort of made sense
> (we can have different zram devices in a system - one swap, a bunch
> mounted with various file-systems on them).
So the lock class is registered dynamically for each zram device
zram_add()
lockdep_register_key(&zram->lock_class);
and then we use that zram->lock_class to init zram->table entries.
We unregister the lock_class during each zram device destruction
zram_remove()
lockdep_unregister_key(&zram->lock_class);
Does this still put zram->table entries into different lock classes?
On 2025-02-27 22:04:16 [+0900], Sergey Senozhatsky wrote:
> On (25/02/27 21:42), Sergey Senozhatsky wrote:
> > > ach. Got it. What about
> > >
> > > | static void zram_slot_lock_init(struct zram *zram, u32 index)
> > > | {
> > > | static struct lock_class_key __key;
> > > |
> > > | lockdep_init_map(slot_dep_map(zram, index),
> > > | "zram->table[index].lock",
> > > | &__key, 0);
> > > | }
> > >
> > > So every lock coming from zram belongs to the same class. Otherwise each
> > > lock coming from zram_slot_lock_init() would belong to a different class
> > > and for lockdep it would look like they are different locks. But they
> > > are used always in the same way.
> >
> > I see. I thought that they key was "shared" between zram meta table
> > entries because the key is per-zram device, which sort of made sense
> > (we can have different zram devices in a system - one swap, a bunch
> > mounted with various file-systems on them).
Yes. So usually you do spin_lock_init() and this creates a key at _this_
very position. So every lock initialized at this position shares the
same class/ the same pattern.
> So the lock class is registered dynamically for each zram device
>
> zram_add()
> lockdep_register_key(&zram->lock_class);
>
> and then we use that zram->lock_class to init zram->table entries.
>
> We unregister the lock_class during each zram device destruction
>
> zram_remove()
> lockdep_unregister_key(&zram->lock_class);
>
> Does this still put zram->table entries into different lock classes?
You shouldn't need to register and unregister the lock_class. What you
do should match for instance j_trans_commit_map in fs/jbd2/journal.c or
__key in include/linux/rhashtable.h & lib/rhashtable.c.
At least based on my understanding so far.
Sebastian
On (25/02/27 14:12), Sebastian Andrzej Siewior wrote: > > > I see. I thought that they key was "shared" between zram meta table > > > entries because the key is per-zram device, which sort of made sense > > > (we can have different zram devices in a system - one swap, a bunch > > > mounted with various file-systems on them). > > Yes. So usually you do spin_lock_init() and this creates a key at _this_ > very position. So every lock initialized at this position shares the > same class/ the same pattern. > > > So the lock class is registered dynamically for each zram device > > > > zram_add() > > lockdep_register_key(&zram->lock_class); > > > > and then we use that zram->lock_class to init zram->table entries. > > > > We unregister the lock_class during each zram device destruction > > > > zram_remove() > > lockdep_unregister_key(&zram->lock_class); > > > > Does this still put zram->table entries into different lock classes? > > You shouldn't need to register and unregister the lock_class. What you > do should match for instance j_trans_commit_map in fs/jbd2/journal.c or > __key in include/linux/rhashtable.h & lib/rhashtable.c. I see, thank you. Let me try static keys then (in zram and in zsmalloc). Will need a day or two to re-run the tests, and then will send out an updated series.
© 2016 - 2025 Red Hat, Inc.