A socket using sockmap has its own independent receive queue: ingress_msg.
This queue may contain data from its own protocol stack or from other
sockets.
The issue is that when reading from ingress_msg, we update tp->copied_seq
by default. However, if the data is not from its own protocol stack,
tcp->rcv_nxt is not increased. Later, if we convert this socket to a
native socket, reading from this socket may fail because copied_seq might
be significantly larger than rcv_nxt.
This fix also addresses the syzkaller-reported bug referenced in the
Closes tag.
This patch marks the skmsg objects in ingress_msg. When reading, we update
copied_seq only if the data is from its own protocol stack.
FD1:read()
-- FD1->copied_seq++
| [read data]
|
[enqueue data] v
[sockmap] -> ingress to self -> ingress_msg queue
FD1 native stack ------> ^
-- FD1->rcv_nxt++ -> redirect to other | [enqueue data]
| |
| ingress to FD1
v ^
... | [sockmap]
FD2 native stack
Closes: https://syzkaller.appspot.com/bug?extid=06dbd397158ec0ea4983
Fixes: 04919bed948dc ("tcp: Introduce tcp_read_skb()")
Reviewed-by: Jakub Sitnicki <jakub@cloudflare.com>
Signed-off-by: Jiayuan Chen <jiayuan.chen@linux.dev>
---
include/linux/skmsg.h | 2 ++
net/core/skmsg.c | 28 +++++++++++++++++++++++++---
net/ipv4/tcp_bpf.c | 5 +++--
3 files changed, 30 insertions(+), 5 deletions(-)
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 49847888c287..dfdc158ab88c 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -141,6 +141,8 @@ int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
struct sk_msg *msg, u32 bytes);
int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
int len, int flags);
+int __sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
+ int len, int flags, int *copied_from_self);
bool sk_msg_is_readable(struct sock *sk);
static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 2ac7731e1e0a..ca22ecdbf192 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -409,22 +409,26 @@ int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
}
EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
-/* Receive sk_msg from psock->ingress_msg to @msg. */
-int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
- int len, int flags)
+int __sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
+ int len, int flags, int *copied_from_self)
{
struct iov_iter *iter = &msg->msg_iter;
int peek = flags & MSG_PEEK;
struct sk_msg *msg_rx;
int i, copied = 0;
+ bool from_self;
msg_rx = sk_psock_peek_msg(psock);
+ if (copied_from_self)
+ *copied_from_self = 0;
+
while (copied != len) {
struct scatterlist *sge;
if (unlikely(!msg_rx))
break;
+ from_self = msg_rx->sk == sk;
i = msg_rx->sg.start;
do {
struct page *page;
@@ -443,6 +447,9 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
}
copied += copy;
+ if (from_self && copied_from_self)
+ *copied_from_self += copy;
+
if (likely(!peek)) {
sge->offset += copy;
sge->length -= copy;
@@ -487,6 +494,14 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
out:
return copied;
}
+EXPORT_SYMBOL_GPL(__sk_msg_recvmsg);
+
+/* Receive sk_msg from psock->ingress_msg to @msg. */
+int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
+ int len, int flags)
+{
+ return __sk_msg_recvmsg(sk, psock, msg, len, flags, NULL);
+}
EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
bool sk_msg_is_readable(struct sock *sk)
@@ -616,6 +631,12 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb
if (unlikely(!msg))
return -EAGAIN;
skb_set_owner_r(skb, sk);
+
+ /* This is used in tcp_bpf_recvmsg_parser() to determine whether the
+ * data originates from the socket's own protocol stack. No need to
+ * refcount sk because msg's lifetime is bound to sk via the ingress_msg.
+ */
+ msg->sk = sk;
err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, take_ref);
if (err < 0)
kfree(msg);
@@ -909,6 +930,7 @@ int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
sk_msg_compute_data_pointers(msg);
msg->sk = sk;
ret = bpf_prog_run_pin_on_cpu(prog, msg);
+ msg->sk = NULL;
ret = sk_psock_map_verd(ret, msg->sk_redir);
psock->apply_bytes = msg->apply_bytes;
if (ret == __SK_REDIRECT) {
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index a268e1595b22..5c698fd7fbf8 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -226,6 +226,7 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk,
int peek = flags & MSG_PEEK;
struct sk_psock *psock;
struct tcp_sock *tcp;
+ int copied_from_self = 0;
int copied = 0;
u32 seq;
@@ -262,7 +263,7 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk,
}
msg_bytes_ready:
- copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
+ copied = __sk_msg_recvmsg(sk, psock, msg, len, flags, &copied_from_self);
/* The typical case for EFAULT is the socket was gracefully
* shutdown with a FIN pkt. So check here the other case is
* some error on copy_page_to_iter which would be unexpected.
@@ -277,7 +278,7 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk,
goto out;
}
}
- seq += copied;
+ seq += copied_from_self;
if (!copied) {
long timeo;
int data;
--
2.43.0
On Tue, Jan 13, 2026 at 10:50 AM +08, Jiayuan Chen wrote:
> A socket using sockmap has its own independent receive queue: ingress_msg.
> This queue may contain data from its own protocol stack or from other
> sockets.
>
> The issue is that when reading from ingress_msg, we update tp->copied_seq
> by default. However, if the data is not from its own protocol stack,
> tcp->rcv_nxt is not increased. Later, if we convert this socket to a
> native socket, reading from this socket may fail because copied_seq might
> be significantly larger than rcv_nxt.
>
> This fix also addresses the syzkaller-reported bug referenced in the
> Closes tag.
>
> This patch marks the skmsg objects in ingress_msg. When reading, we update
> copied_seq only if the data is from its own protocol stack.
>
> FD1:read()
> -- FD1->copied_seq++
> | [read data]
> |
> [enqueue data] v
> [sockmap] -> ingress to self -> ingress_msg queue
> FD1 native stack ------> ^
> -- FD1->rcv_nxt++ -> redirect to other | [enqueue data]
> | |
> | ingress to FD1
> v ^
> ... | [sockmap]
> FD2 native stack
>
> Closes: https://syzkaller.appspot.com/bug?extid=06dbd397158ec0ea4983
> Fixes: 04919bed948dc ("tcp: Introduce tcp_read_skb()")
> Reviewed-by: Jakub Sitnicki <jakub@cloudflare.com>
> Signed-off-by: Jiayuan Chen <jiayuan.chen@linux.dev>
> ---
> include/linux/skmsg.h | 2 ++
> net/core/skmsg.c | 28 +++++++++++++++++++++++++---
> net/ipv4/tcp_bpf.c | 5 +++--
> 3 files changed, 30 insertions(+), 5 deletions(-)
>
> diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
> index 49847888c287..dfdc158ab88c 100644
> --- a/include/linux/skmsg.h
> +++ b/include/linux/skmsg.h
> @@ -141,6 +141,8 @@ int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
> struct sk_msg *msg, u32 bytes);
> int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
> int len, int flags);
> +int __sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
> + int len, int flags, int *copied_from_self);
> bool sk_msg_is_readable(struct sock *sk);
>
> static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
> diff --git a/net/core/skmsg.c b/net/core/skmsg.c
> index 2ac7731e1e0a..ca22ecdbf192 100644
> --- a/net/core/skmsg.c
> +++ b/net/core/skmsg.c
> @@ -409,22 +409,26 @@ int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
> }
> EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
>
> -/* Receive sk_msg from psock->ingress_msg to @msg. */
> -int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
> - int len, int flags)
> +int __sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
> + int len, int flags, int *copied_from_self)
> {
> struct iov_iter *iter = &msg->msg_iter;
> int peek = flags & MSG_PEEK;
> struct sk_msg *msg_rx;
> int i, copied = 0;
> + bool from_self;
>
> msg_rx = sk_psock_peek_msg(psock);
> + if (copied_from_self)
> + *copied_from_self = 0;
> +
> while (copied != len) {
> struct scatterlist *sge;
>
> if (unlikely(!msg_rx))
> break;
>
> + from_self = msg_rx->sk == sk;
> i = msg_rx->sg.start;
> do {
> struct page *page;
> @@ -443,6 +447,9 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
> }
>
> copied += copy;
> + if (from_self && copied_from_self)
> + *copied_from_self += copy;
> +
> if (likely(!peek)) {
> sge->offset += copy;
> sge->length -= copy;
> @@ -487,6 +494,14 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
> out:
> return copied;
> }
> +EXPORT_SYMBOL_GPL(__sk_msg_recvmsg);
Nit: Sorry, I haven't caught that before. tcp_bpf is a built-in. We
don't need to export this internal helper to modules.
> +
> +/* Receive sk_msg from psock->ingress_msg to @msg. */
> +int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
> + int len, int flags)
> +{
> + return __sk_msg_recvmsg(sk, psock, msg, len, flags, NULL);
> +}
> EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
>
> bool sk_msg_is_readable(struct sock *sk)
> @@ -616,6 +631,12 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb
> if (unlikely(!msg))
> return -EAGAIN;
> skb_set_owner_r(skb, sk);
> +
> + /* This is used in tcp_bpf_recvmsg_parser() to determine whether the
> + * data originates from the socket's own protocol stack. No need to
> + * refcount sk because msg's lifetime is bound to sk via the ingress_msg.
> + */
> + msg->sk = sk;
> err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, take_ref);
> if (err < 0)
> kfree(msg);
> @@ -909,6 +930,7 @@ int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
> sk_msg_compute_data_pointers(msg);
> msg->sk = sk;
> ret = bpf_prog_run_pin_on_cpu(prog, msg);
> + msg->sk = NULL;
> ret = sk_psock_map_verd(ret, msg->sk_redir);
> psock->apply_bytes = msg->apply_bytes;
> if (ret == __SK_REDIRECT) {
> diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
> index a268e1595b22..5c698fd7fbf8 100644
> --- a/net/ipv4/tcp_bpf.c
> +++ b/net/ipv4/tcp_bpf.c
> @@ -226,6 +226,7 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk,
> int peek = flags & MSG_PEEK;
> struct sk_psock *psock;
> struct tcp_sock *tcp;
> + int copied_from_self = 0;
> int copied = 0;
> u32 seq;
>
> @@ -262,7 +263,7 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk,
> }
>
> msg_bytes_ready:
> - copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
> + copied = __sk_msg_recvmsg(sk, psock, msg, len, flags, &copied_from_self);
> /* The typical case for EFAULT is the socket was gracefully
> * shutdown with a FIN pkt. So check here the other case is
> * some error on copy_page_to_iter which would be unexpected.
> @@ -277,7 +278,7 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk,
> goto out;
> }
> }
> - seq += copied;
> + seq += copied_from_self;
> if (!copied) {
> long timeo;
> int data;
On 2026-01-20 16:01:21, Jakub Sitnicki wrote:
> On Tue, Jan 13, 2026 at 10:50 AM +08, Jiayuan Chen wrote:
> > A socket using sockmap has its own independent receive queue: ingress_msg.
> > This queue may contain data from its own protocol stack or from other
> > sockets.
> >
> > The issue is that when reading from ingress_msg, we update tp->copied_seq
> > by default. However, if the data is not from its own protocol stack,
> > tcp->rcv_nxt is not increased. Later, if we convert this socket to a
> > native socket, reading from this socket may fail because copied_seq might
> > be significantly larger than rcv_nxt.
> >
> > This fix also addresses the syzkaller-reported bug referenced in the
> > Closes tag.
> >
> > This patch marks the skmsg objects in ingress_msg. When reading, we update
> > copied_seq only if the data is from its own protocol stack.
> >
> > FD1:read()
> > -- FD1->copied_seq++
> > | [read data]
> > |
> > [enqueue data] v
> > [sockmap] -> ingress to self -> ingress_msg queue
> > FD1 native stack ------> ^
> > -- FD1->rcv_nxt++ -> redirect to other | [enqueue data]
> > | |
> > | ingress to FD1
> > v ^
> > ... | [sockmap]
> > FD2 native stack
> >
> > Closes: https://syzkaller.appspot.com/bug?extid=06dbd397158ec0ea4983
> > Fixes: 04919bed948dc ("tcp: Introduce tcp_read_skb()")
> > Reviewed-by: Jakub Sitnicki <jakub@cloudflare.com>
> > Signed-off-by: Jiayuan Chen <jiayuan.chen@linux.dev>
[...]
> > @@ -487,6 +494,14 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
> > out:
> > return copied;
> > }
> > +EXPORT_SYMBOL_GPL(__sk_msg_recvmsg);
>
> Nit: Sorry, I haven't caught that before. tcp_bpf is a built-in. We
> don't need to export this internal helper to modules.
We could probably push this without the 2/3 patch? If we are debating
that patch still would be good to get this merged.
Reviewed-by: John Fastabend <john.fastabend@gmail.com>
On Tue, Jan 20, 2026 at 07:38 AM -08, John Fastabend wrote: > We could probably push this without the 2/3 patch? If we are debating > that patch still would be good to get this merged. Definitely. But also see my email from today [1]. I think I figured a way how we can make leave FIONREAD in a better shape that it is today - with what Jiayuan proposed - and improve it in the future. [1] https://msgid.link/871pjjux2u.fsf@cloudflare.com
© 2016 - 2026 Red Hat, Inc.