Replace kmalloc(size * sizeof) with kmalloc_array() for safer memory
allocation and overflow prevention.
Signed-off-by: Liao Yuanhong <liaoyuanhong@vivo.com>
---
fs/bcachefs/btree_key_cache.c | 2 +-
fs/bcachefs/btree_trans_commit.c | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index 4890cbc88e7c..8dd70024e513 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -136,7 +136,7 @@ static struct bkey_cached *__bkey_cached_alloc(unsigned key_u64s, gfp_t gfp)
struct bkey_cached *ck = kmem_cache_zalloc(bch2_key_cache, gfp);
if (unlikely(!ck))
return NULL;
- ck->k = kmalloc(key_u64s * sizeof(u64), gfp);
+ ck->k = kmalloc_array(key_u64s, sizeof(u64), gfp);
if (unlikely(!ck->k)) {
kmem_cache_free(bch2_key_cache, ck);
return NULL;
diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c
index 4d58bdb233e9..4102a3cb2410 100644
--- a/fs/bcachefs/btree_trans_commit.c
+++ b/fs/bcachefs/btree_trans_commit.c
@@ -396,7 +396,7 @@ btree_key_can_insert_cached_slowpath(struct btree_trans *trans, unsigned flags,
bch2_trans_unlock_updates_write(trans);
bch2_trans_unlock(trans);
- new_k = kmalloc(new_u64s * sizeof(u64), GFP_KERNEL);
+ new_k = kmalloc_array(new_u64s, sizeof(u64), GFP_KERNEL);
if (!new_k) {
struct bch_fs *c = trans->c;
bch_err(c, "error allocating memory for key cache key, btree %s u64s %u",
--
2.34.1
On Aug 11, 2025, at 20:09, Liao Yuanhong <liaoyuanhong@vivo.com> wrote: > > Replace kmalloc(size * sizeof) with kmalloc_array() for safer memory > allocation and overflow prevention. > > Signed-off-by: Liao Yuanhong <liaoyuanhong@vivo.com> > --- > fs/bcachefs/btree_key_cache.c | 2 +- > fs/bcachefs/btree_trans_commit.c | 2 +- > 2 files changed, 2 insertions(+), 2 deletions(-) > > diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c > index 4890cbc88e7c..8dd70024e513 100644 > --- a/fs/bcachefs/btree_key_cache.c > +++ b/fs/bcachefs/btree_key_cache.c > @@ -136,7 +136,7 @@ static struct bkey_cached *__bkey_cached_alloc(unsigned key_u64s, gfp_t gfp) > struct bkey_cached *ck = kmem_cache_zalloc(bch2_key_cache, gfp); > if (unlikely(!ck)) > return NULL; > - ck->k = kmalloc(key_u64s * sizeof(u64), gfp); > + ck->k = kmalloc_array(key_u64s, sizeof(u64), gfp); > if (unlikely(!ck->k)) { > kmem_cache_free(bch2_key_cache, ck); > return NULL; > diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c > index 4d58bdb233e9..4102a3cb2410 100644 > --- a/fs/bcachefs/btree_trans_commit.c > +++ b/fs/bcachefs/btree_trans_commit.c > @@ -396,7 +396,7 @@ btree_key_can_insert_cached_slowpath(struct btree_trans *trans, unsigned flags, > bch2_trans_unlock_updates_write(trans); > bch2_trans_unlock(trans); > > - new_k = kmalloc(new_u64s * sizeof(u64), GFP_KERNEL); > + new_k = kmalloc_array(new_u64s, sizeof(u64), GFP_KERNEL); No, it’s not an array. > if (!new_k) { > struct bch_fs *c = trans->c; > bch_err(c, "error allocating memory for key cache key, btree %s u64s %u", > -- > 2.34.1 > >
On 8/11/2025 8:50 PM, Alan Huang wrote: > [You don't often get email from mmpgouride@gmail.com. Learn why this is important at https://aka.ms/LearnAboutSenderIdentification ] > > On Aug 11, 2025, at 20:09, Liao Yuanhong <liaoyuanhong@vivo.com> wrote: >> Replace kmalloc(size * sizeof) with kmalloc_array() for safer memory >> allocation and overflow prevention. >> >> Signed-off-by: Liao Yuanhong <liaoyuanhong@vivo.com> >> --- >> fs/bcachefs/btree_key_cache.c | 2 +- >> fs/bcachefs/btree_trans_commit.c | 2 +- >> 2 files changed, 2 insertions(+), 2 deletions(-) >> >> diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c >> index 4890cbc88e7c..8dd70024e513 100644 >> --- a/fs/bcachefs/btree_key_cache.c >> +++ b/fs/bcachefs/btree_key_cache.c >> @@ -136,7 +136,7 @@ static struct bkey_cached *__bkey_cached_alloc(unsigned key_u64s, gfp_t gfp) >> struct bkey_cached *ck = kmem_cache_zalloc(bch2_key_cache, gfp); >> if (unlikely(!ck)) >> return NULL; >> - ck->k = kmalloc(key_u64s * sizeof(u64), gfp); >> + ck->k = kmalloc_array(key_u64s, sizeof(u64), gfp); >> if (unlikely(!ck->k)) { >> kmem_cache_free(bch2_key_cache, ck); >> return NULL; >> diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c >> index 4d58bdb233e9..4102a3cb2410 100644 >> --- a/fs/bcachefs/btree_trans_commit.c >> +++ b/fs/bcachefs/btree_trans_commit.c >> @@ -396,7 +396,7 @@ btree_key_can_insert_cached_slowpath(struct btree_trans *trans, unsigned flags, >> bch2_trans_unlock_updates_write(trans); >> bch2_trans_unlock(trans); >> >> - new_k = kmalloc(new_u64s * sizeof(u64), GFP_KERNEL); >> + new_k = kmalloc_array(new_u64s, sizeof(u64), GFP_KERNEL); > No, it’s not an array. Yes, there is a problem with this patch. Please ignore it. Thanks, Liao >> if (!new_k) { >> struct bch_fs *c = trans->c; >> bch_err(c, "error allocating memory for key cache key, btree %s u64s %u", >> -- >> 2.34.1 >> >>
© 2016 - 2025 Red Hat, Inc.