From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>
Allocate a sheaf and fill it to the count amount. Does not fill to the
sheaf limit to detect incorrect allocation requests.
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
tools/include/linux/slab.h | 24 +++++++++++++
tools/testing/shared/linux.c | 84 ++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 108 insertions(+)
diff --git a/tools/include/linux/slab.h b/tools/include/linux/slab.h
index d1444e79f2685edb828adbce8b3fbb500c0f8844..1962d7f1abee154e1cda5dba28aef213088dd198 100644
--- a/tools/include/linux/slab.h
+++ b/tools/include/linux/slab.h
@@ -23,6 +23,13 @@ enum slab_state {
FULL
};
+struct slab_sheaf {
+ struct kmem_cache *cache;
+ unsigned int size;
+ unsigned int capacity;
+ void *objects[];
+};
+
struct kmem_cache_args {
unsigned int align;
unsigned int sheaf_capacity;
@@ -80,4 +87,21 @@ void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list);
int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
void **list);
+struct slab_sheaf *
+kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size);
+
+void *
+kmem_cache_alloc_from_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf *sheaf);
+
+void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf *sheaf);
+int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf **sheafp, unsigned int size);
+
+static inline unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf)
+{
+ return sheaf->size;
+}
+
#endif /* _TOOLS_SLAB_H */
diff --git a/tools/testing/shared/linux.c b/tools/testing/shared/linux.c
index f998555a1b2af4a899a468a652b04622df459ed3..e0255f53159bd3a1325d49192283dd6790a5e3b8 100644
--- a/tools/testing/shared/linux.c
+++ b/tools/testing/shared/linux.c
@@ -181,6 +181,12 @@ int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
if (kmalloc_verbose)
pr_debug("Bulk alloc %zu\n", size);
+ if (cachep->exec_callback) {
+ if (cachep->callback)
+ cachep->callback(cachep->private);
+ cachep->exec_callback = false;
+ }
+
pthread_mutex_lock(&cachep->lock);
if (cachep->nr_objs >= size) {
struct radix_tree_node *node;
@@ -270,6 +276,84 @@ __kmem_cache_create_args(const char *name, unsigned int size,
return ret;
}
+struct slab_sheaf *
+kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size)
+{
+ struct slab_sheaf *sheaf;
+ unsigned int capacity;
+
+ if (size > s->sheaf_capacity)
+ capacity = size;
+ else
+ capacity = s->sheaf_capacity;
+
+ sheaf = malloc(sizeof(*sheaf) + sizeof(void *) * s->sheaf_capacity * capacity);
+ if (!sheaf) {
+ return NULL;
+ }
+
+ memset(sheaf, 0, size);
+ sheaf->cache = s;
+ sheaf->capacity = capacity;
+ sheaf->size = kmem_cache_alloc_bulk(s, gfp, size, sheaf->objects);
+ if (!sheaf->size) {
+ free(sheaf);
+ return NULL;
+ }
+
+ return sheaf;
+}
+
+int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf **sheafp, unsigned int size)
+{
+ struct slab_sheaf *sheaf = *sheafp;
+ int refill;
+
+ if (sheaf->size >= size)
+ return 0;
+
+ if (size > sheaf->capacity) {
+ sheaf = kmem_cache_prefill_sheaf(s, gfp, size);
+ if (!sheaf)
+ return -ENOMEM;
+
+ kmem_cache_return_sheaf(s, gfp, *sheafp);
+ *sheafp = sheaf;
+ return 0;
+ }
+
+ refill = kmem_cache_alloc_bulk(s, gfp, size - sheaf->size,
+ &sheaf->objects[sheaf->size]);
+ if (!refill)
+ return -ENOMEM;
+
+ sheaf->size += refill;
+ return 0;
+}
+
+void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf *sheaf)
+{
+ if (sheaf->size) {
+ //s->non_kernel += sheaf->size;
+ kmem_cache_free_bulk(s, sheaf->size, &sheaf->objects[0]);
+ }
+ free(sheaf);
+}
+
+void *
+kmem_cache_alloc_from_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf *sheaf)
+{
+ if (sheaf->size == 0) {
+ printf("Nothing left in sheaf!\n");
+ return NULL;
+ }
+
+ return sheaf->objects[--sheaf->size];
+}
+
/*
* Test the test infrastructure for kem_cache_alloc/free and bulk counterparts.
*/
--
2.50.1
On Wed, Jul 23, 2025 at 6:35 AM Vlastimil Babka <vbabka@suse.cz> wrote: > > From: "Liam R. Howlett" <Liam.Howlett@Oracle.com> > > Allocate a sheaf and fill it to the count amount. Does not fill to the > sheaf limit to detect incorrect allocation requests. > > Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com> > Signed-off-by: Vlastimil Babka <vbabka@suse.cz> > --- > tools/include/linux/slab.h | 24 +++++++++++++ > tools/testing/shared/linux.c | 84 ++++++++++++++++++++++++++++++++++++++++++++ > 2 files changed, 108 insertions(+) > > diff --git a/tools/include/linux/slab.h b/tools/include/linux/slab.h > index d1444e79f2685edb828adbce8b3fbb500c0f8844..1962d7f1abee154e1cda5dba28aef213088dd198 100644 > --- a/tools/include/linux/slab.h > +++ b/tools/include/linux/slab.h > @@ -23,6 +23,13 @@ enum slab_state { > FULL > }; > > +struct slab_sheaf { > + struct kmem_cache *cache; > + unsigned int size; > + unsigned int capacity; > + void *objects[]; > +}; > + > struct kmem_cache_args { > unsigned int align; > unsigned int sheaf_capacity; > @@ -80,4 +87,21 @@ void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list); > int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size, > void **list); > > +struct slab_sheaf * > +kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size); > + > +void * > +kmem_cache_alloc_from_sheaf(struct kmem_cache *s, gfp_t gfp, > + struct slab_sheaf *sheaf); > + > +void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp, > + struct slab_sheaf *sheaf); > +int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp, > + struct slab_sheaf **sheafp, unsigned int size); > + > +static inline unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf) > +{ > + return sheaf->size; > +} > + > #endif /* _TOOLS_SLAB_H */ > diff --git a/tools/testing/shared/linux.c b/tools/testing/shared/linux.c > index f998555a1b2af4a899a468a652b04622df459ed3..e0255f53159bd3a1325d49192283dd6790a5e3b8 100644 > --- a/tools/testing/shared/linux.c > +++ b/tools/testing/shared/linux.c > @@ -181,6 +181,12 @@ int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size, > if (kmalloc_verbose) > pr_debug("Bulk alloc %zu\n", size); > > + if (cachep->exec_callback) { > + if (cachep->callback) > + cachep->callback(cachep->private); > + cachep->exec_callback = false; > + } > + > pthread_mutex_lock(&cachep->lock); > if (cachep->nr_objs >= size) { > struct radix_tree_node *node; > @@ -270,6 +276,84 @@ __kmem_cache_create_args(const char *name, unsigned int size, > return ret; > } > > +struct slab_sheaf * > +kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size) > +{ > + struct slab_sheaf *sheaf; > + unsigned int capacity; > + > + if (size > s->sheaf_capacity) > + capacity = size; > + else > + capacity = s->sheaf_capacity; nit: capacity = max(size, s->sheaf_capacity); > + > + sheaf = malloc(sizeof(*sheaf) + sizeof(void *) * s->sheaf_capacity * capacity); Should this really be `sizeof(void *) * s->sheaf_capacity * capacity` or just `sizeof(void *) * capacity` ? > + if (!sheaf) { > + return NULL; > + } > + > + memset(sheaf, 0, size); > + sheaf->cache = s; > + sheaf->capacity = capacity; > + sheaf->size = kmem_cache_alloc_bulk(s, gfp, size, sheaf->objects); > + if (!sheaf->size) { > + free(sheaf); > + return NULL; > + } > + > + return sheaf; > +} > + > +int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp, > + struct slab_sheaf **sheafp, unsigned int size) > +{ > + struct slab_sheaf *sheaf = *sheafp; > + int refill; > + > + if (sheaf->size >= size) > + return 0; > + > + if (size > sheaf->capacity) { > + sheaf = kmem_cache_prefill_sheaf(s, gfp, size); > + if (!sheaf) > + return -ENOMEM; > + > + kmem_cache_return_sheaf(s, gfp, *sheafp); > + *sheafp = sheaf; > + return 0; > + } > + > + refill = kmem_cache_alloc_bulk(s, gfp, size - sheaf->size, > + &sheaf->objects[sheaf->size]); > + if (!refill) > + return -ENOMEM; > + > + sheaf->size += refill; > + return 0; > +} > + > +void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp, > + struct slab_sheaf *sheaf) > +{ > + if (sheaf->size) { > + //s->non_kernel += sheaf->size; Above comment seems obsolete. > + kmem_cache_free_bulk(s, sheaf->size, &sheaf->objects[0]); > + } > + free(sheaf); > +} > + > +void * > +kmem_cache_alloc_from_sheaf(struct kmem_cache *s, gfp_t gfp, > + struct slab_sheaf *sheaf) > +{ > + if (sheaf->size == 0) { > + printf("Nothing left in sheaf!\n"); > + return NULL; > + } > + Should we clear sheaf->objects[sheaf->size] for additional safety? > + return sheaf->objects[--sheaf->size]; > +} > + > /* > * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts. > */ > > -- > 2.50.1 >
On 8/22/25 18:56, Suren Baghdasaryan wrote: >> @@ -270,6 +276,84 @@ __kmem_cache_create_args(const char *name, unsigned int size, >> return ret; >> } >> >> +struct slab_sheaf * >> +kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size) >> +{ >> + struct slab_sheaf *sheaf; >> + unsigned int capacity; >> + >> + if (size > s->sheaf_capacity) >> + capacity = size; >> + else >> + capacity = s->sheaf_capacity; > > nit: > capacity = max(size, s->sheaf_capacity); OK >> + >> + sheaf = malloc(sizeof(*sheaf) + sizeof(void *) * s->sheaf_capacity * capacity); > > Should this really be `sizeof(void *) * s->sheaf_capacity * capacity` > or just `sizeof(void *) * capacity` ? Right, so the whole thing should be: sizeof(*sheaf) + sizeof(void *) * capacity > >> + if (!sheaf) { >> + return NULL; >> + } >> + >> + memset(sheaf, 0, size); This is also wrong, so I'm changing it to calloc(1, ...) to get the zeroing there. >> + sheaf->cache = s; >> + sheaf->capacity = capacity; >> + sheaf->size = kmem_cache_alloc_bulk(s, gfp, size, sheaf->objects); >> + if (!sheaf->size) { >> + free(sheaf); >> + return NULL; >> + } >> + >> + return sheaf; >> +} >> + >> +int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp, >> + struct slab_sheaf **sheafp, unsigned int size) >> +{ >> + struct slab_sheaf *sheaf = *sheafp; >> + int refill; >> + >> + if (sheaf->size >= size) >> + return 0; >> + >> + if (size > sheaf->capacity) { >> + sheaf = kmem_cache_prefill_sheaf(s, gfp, size); >> + if (!sheaf) >> + return -ENOMEM; >> + >> + kmem_cache_return_sheaf(s, gfp, *sheafp); >> + *sheafp = sheaf; >> + return 0; >> + } >> + >> + refill = kmem_cache_alloc_bulk(s, gfp, size - sheaf->size, >> + &sheaf->objects[sheaf->size]); >> + if (!refill) >> + return -ENOMEM; >> + >> + sheaf->size += refill; >> + return 0; >> +} >> + >> +void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp, >> + struct slab_sheaf *sheaf) >> +{ >> + if (sheaf->size) { >> + //s->non_kernel += sheaf->size; > > Above comment seems obsolete. Ack. > >> + kmem_cache_free_bulk(s, sheaf->size, &sheaf->objects[0]); >> + } >> + free(sheaf); >> +} >> + >> +void * >> +kmem_cache_alloc_from_sheaf(struct kmem_cache *s, gfp_t gfp, >> + struct slab_sheaf *sheaf) >> +{ >> + if (sheaf->size == 0) { >> + printf("Nothing left in sheaf!\n"); >> + return NULL; >> + } >> + > > Should we clear sheaf->objects[sheaf->size] for additional safety? OK. >> + return sheaf->objects[--sheaf->size]; >> +} >> + >> /* >> * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts. >> */ >> >> -- >> 2.50.1 >>
© 2016 - 2025 Red Hat, Inc.