net/xdp/xdp_umem.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-)
The number of chunks can overflow u32. Make sure to return -EINVAL on
overflow.
Also remove a redundant u32 cast assigning umem->npgs.
Fixes: bbff2f321a86 ("xsk: new descriptor addressing scheme")
Signed-off-by: Kal Conley <kal.conley@dectris.com>
---
net/xdp/xdp_umem.c | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index 4681e8e8ad94..02207e852d79 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -150,10 +150,11 @@ static int xdp_umem_account_pages(struct xdp_umem *umem)
static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
{
- u32 npgs_rem, chunk_size = mr->chunk_size, headroom = mr->headroom;
bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
- u64 npgs, addr = mr->addr, size = mr->len;
- unsigned int chunks, chunks_rem;
+ u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
+ u64 addr = mr->addr, size = mr->len;
+ u32 chunks_rem, npgs_rem;
+ u64 chunks, npgs;
int err;
if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
@@ -188,8 +189,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
if (npgs > U32_MAX)
return -EINVAL;
- chunks = (unsigned int)div_u64_rem(size, chunk_size, &chunks_rem);
- if (chunks == 0)
+ chunks = div_u64_rem(size, chunk_size, &chunks_rem);
+ if (!chunks || chunks > U32_MAX)
return -EINVAL;
if (!unaligned_chunks && chunks_rem)
@@ -202,7 +203,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
umem->headroom = headroom;
umem->chunk_size = chunk_size;
umem->chunks = chunks;
- umem->npgs = (u32)npgs;
+ umem->npgs = npgs;
umem->pgs = NULL;
umem->user = NULL;
umem->flags = mr->flags;
--
2.39.2
On Wed, 8 Mar 2023 at 18:51, Kal Conley <kal.conley@dectris.com> wrote: > > The number of chunks can overflow u32. Make sure to return -EINVAL on > overflow. > > Also remove a redundant u32 cast assigning umem->npgs. Thanks! Acked-by: Magnus Karlsson <magnus.karlsson@intel.com> > Fixes: bbff2f321a86 ("xsk: new descriptor addressing scheme") > Signed-off-by: Kal Conley <kal.conley@dectris.com> > --- > net/xdp/xdp_umem.c | 13 +++++++------ > 1 file changed, 7 insertions(+), 6 deletions(-) > > diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c > index 4681e8e8ad94..02207e852d79 100644 > --- a/net/xdp/xdp_umem.c > +++ b/net/xdp/xdp_umem.c > @@ -150,10 +150,11 @@ static int xdp_umem_account_pages(struct xdp_umem *umem) > > static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) > { > - u32 npgs_rem, chunk_size = mr->chunk_size, headroom = mr->headroom; > bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; > - u64 npgs, addr = mr->addr, size = mr->len; > - unsigned int chunks, chunks_rem; > + u32 chunk_size = mr->chunk_size, headroom = mr->headroom; > + u64 addr = mr->addr, size = mr->len; > + u32 chunks_rem, npgs_rem; > + u64 chunks, npgs; > int err; > > if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) { > @@ -188,8 +189,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) > if (npgs > U32_MAX) > return -EINVAL; > > - chunks = (unsigned int)div_u64_rem(size, chunk_size, &chunks_rem); > - if (chunks == 0) > + chunks = div_u64_rem(size, chunk_size, &chunks_rem); > + if (!chunks || chunks > U32_MAX) > return -EINVAL; > > if (!unaligned_chunks && chunks_rem) > @@ -202,7 +203,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) > umem->headroom = headroom; > umem->chunk_size = chunk_size; > umem->chunks = chunks; > - umem->npgs = (u32)npgs; > + umem->npgs = npgs; > umem->pgs = NULL; > umem->user = NULL; > umem->flags = mr->flags; > -- > 2.39.2 >
On 3/16/23 1:52 PM, Magnus Karlsson wrote: > On Wed, 8 Mar 2023 at 18:51, Kal Conley <kal.conley@dectris.com> wrote: >> >> The number of chunks can overflow u32. Make sure to return -EINVAL on >> overflow. >> >> Also remove a redundant u32 cast assigning umem->npgs. > > Thanks! > > Acked-by: Magnus Karlsson <magnus.karlsson@intel.com> > >> Fixes: bbff2f321a86 ("xsk: new descriptor addressing scheme") >> Signed-off-by: Kal Conley <kal.conley@dectris.com> Looks like patchbot was on partial strike, this was applied to bpf, thanks!
© 2016 - 2025 Red Hat, Inc.