drivers/net/virtio_net.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-)
Since commit 4959aebba8c0 ("virtio-net: use mtu size as buffer length
for big packets"), the allocated size for big packets is not
MAX_SKB_FRAGS * PAGE_SIZE anymore but depends on negotiated MTU. The
number of allocated frags for big packets is stored in
vi->big_packets_num_skbfrags. This commit fixes the received length
check corresponding to that change. The current incorrect check can lead
to NULL page pointer dereference in the below while loop when erroneous
length is received.
Fixes: 4959aebba8c0 ("virtio-net: use mtu size as buffer length for big packets")
Signed-off-by: Bui Quang Minh <minhquangbui99@gmail.com>
---
Changes in v3:
- Convert BUG_ON to WARN_ON_ONCE
Changes in v2:
- Remove incorrect give_pages call
---
drivers/net/virtio_net.c | 18 +++++++++++++-----
1 file changed, 13 insertions(+), 5 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index a757cbcab87f..e7b33e40ea99 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -852,7 +852,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
{
struct sk_buff *skb;
struct virtio_net_common_hdr *hdr;
- unsigned int copy, hdr_len, hdr_padded_len;
+ unsigned int copy, hdr_len, hdr_padded_len, max_remaining_len;
struct page *page_to_free = NULL;
int tailroom, shinfo_size;
char *p, *hdr_p, *buf;
@@ -916,12 +916,16 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
* tries to receive more than is possible. This is usually
* the case of a broken device.
*/
- if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
+ if (WARN_ON_ONCE(offset >= PAGE_SIZE))
+ goto err;
+
+ max_remaining_len = (unsigned int)PAGE_SIZE - offset;
+ max_remaining_len += vi->big_packets_num_skbfrags * PAGE_SIZE;
+ if (unlikely(len > max_remaining_len)) {
net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
- dev_kfree_skb(skb);
- return NULL;
+ goto err;
}
- BUG_ON(offset >= PAGE_SIZE);
+
while (len) {
unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
@@ -941,6 +945,10 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
put_page(page_to_free);
return skb;
+
+err:
+ dev_kfree_skb(skb);
+ return NULL;
}
static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
--
2.43.0
On Tue, Oct 21, 2025 at 11:45 PM Bui Quang Minh
<minhquangbui99@gmail.com> wrote:
>
> Since commit 4959aebba8c0 ("virtio-net: use mtu size as buffer length
> for big packets"), the allocated size for big packets is not
> MAX_SKB_FRAGS * PAGE_SIZE anymore but depends on negotiated MTU
And guest gso features.
>. The
> number of allocated frags for big packets is stored in
> vi->big_packets_num_skbfrags. This commit fixes the received length
> check corresponding to that change. The current incorrect check can lead
> to NULL page pointer dereference in the below while loop when erroneous
> length is received.
It might also be helpful to describe how you can reproduce this issue.
>
> Fixes: 4959aebba8c0 ("virtio-net: use mtu size as buffer length for big packets")
Cc: stable@vger.kernel.org
> Signed-off-by: Bui Quang Minh <minhquangbui99@gmail.com>
> ---
> Changes in v3:
> - Convert BUG_ON to WARN_ON_ONCE
> Changes in v2:
> - Remove incorrect give_pages call
> ---
> drivers/net/virtio_net.c | 18 +++++++++++++-----
> 1 file changed, 13 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index a757cbcab87f..e7b33e40ea99 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -852,7 +852,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
> {
> struct sk_buff *skb;
> struct virtio_net_common_hdr *hdr;
> - unsigned int copy, hdr_len, hdr_padded_len;
> + unsigned int copy, hdr_len, hdr_padded_len, max_remaining_len;
> struct page *page_to_free = NULL;
> int tailroom, shinfo_size;
> char *p, *hdr_p, *buf;
> @@ -916,12 +916,16 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
> * tries to receive more than is possible. This is usually
> * the case of a broken device.
> */
> - if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
> + if (WARN_ON_ONCE(offset >= PAGE_SIZE))
> + goto err;
I'd stick to BUG_ON or at least it needs an independent patch.
> +
> + max_remaining_len = (unsigned int)PAGE_SIZE - offset;
> + max_remaining_len += vi->big_packets_num_skbfrags * PAGE_SIZE;
Let's add a comment to explain the algorithm here or at least mention
to refer add_recvbuf_big().
> + if (unlikely(len > max_remaining_len)) {
> net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
> - dev_kfree_skb(skb);
> - return NULL;
> + goto err;
It looks like this change is not needed?
> }
> - BUG_ON(offset >= PAGE_SIZE);
> +
Unnecessary changes.
> while (len) {
> unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
> skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
> @@ -941,6 +945,10 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
> put_page(page_to_free);
>
> return skb;
> +
> +err:
> + dev_kfree_skb(skb);
> + return NULL;
> }
>
> static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
> --
> 2.43.0
>
Thanks
© 2016 - 2026 Red Hat, Inc.