include/linux/skbuff.h | 22 ++++++++++++++++++++++ net/core/filter.c | 16 ++++++++++++---- net/core/skbuff.c | 19 ++----------------- 3 files changed, 36 insertions(+), 21 deletions(-)
Here is a patch that linearizing skb when downgrade
gso_size and sg should disabled, If there are no issues,
I will submit a formal patch shortly.
Signed-off-by: Fred Li <dracodingfly@gmail.com>
---
include/linux/skbuff.h | 22 ++++++++++++++++++++++
net/core/filter.c | 16 ++++++++++++----
net/core/skbuff.c | 19 ++-----------------
3 files changed, 36 insertions(+), 21 deletions(-)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 5f11f9873341..99b7fc1e826a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2400,6 +2400,28 @@ static inline unsigned int skb_headlen(const struct sk_buff *skb)
return skb->len - skb->data_len;
}
+static inline bool skb_is_nonsg(const struct sk_buff *skb)
+{
+ struct sk_buff *list_skb = skb_shinfo(skb)->frag_list;
+ struct sk_buff *check_skb;
+ for (check_skb = list_skb; check_skb; check_skb = check_skb->next) {
+ if (skb_headlen(check_skb) && !check_skb->head_frag) {
+ /* gso_size is untrusted, and we have a frag_list with
+ * a linear non head_frag item.
+ *
+ * If head_skb's headlen does not fit requested gso_size,
+ * it means that the frag_list members do NOT terminate
+ * on exact gso_size boundaries. Hence we cannot perform
+ * skb_frag_t page sharing. Therefore we must fallback to
+ * copying the frag_list skbs; we do so by disabling SG.
+ */
+ return true;
+ }
+ }
+
+ return false;
+}
+
static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
{
unsigned int i, len = 0;
diff --git a/net/core/filter.c b/net/core/filter.c
index df4578219e82..c0e6e7f28635 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3525,13 +3525,21 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
if (skb_is_gso(skb)) {
struct skb_shared_info *shinfo = skb_shinfo(skb);
- /* Due to header grow, MSS needs to be downgraded. */
- if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
- skb_decrease_gso_size(shinfo, len_diff);
-
/* Header must be checked, and gso_segs recomputed. */
shinfo->gso_type |= gso_type;
shinfo->gso_segs = 0;
+
+ /* Due to header grow, MSS needs to be downgraded.
+ * There is BUG_ON When segment the frag_list with
+ * head_frag true so linearize skb after downgrade
+ * the MSS.
+ */
+ if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) {
+ skb_decrease_gso_size(shinfo, len_diff);
+ if (skb_is_nonsg(skb))
+ return skb_linearize(skb) ? : 0;
+ }
+
}
return 0;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b1dab1b071fc..81e018185527 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4458,23 +4458,8 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) &&
mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) {
- struct sk_buff *check_skb;
-
- for (check_skb = list_skb; check_skb; check_skb = check_skb->next) {
- if (skb_headlen(check_skb) && !check_skb->head_frag) {
- /* gso_size is untrusted, and we have a frag_list with
- * a linear non head_frag item.
- *
- * If head_skb's headlen does not fit requested gso_size,
- * it means that the frag_list members do NOT terminate
- * on exact gso_size boundaries. Hence we cannot perform
- * skb_frag_t page sharing. Therefore we must fallback to
- * copying the frag_list skbs; we do so by disabling SG.
- */
- features &= ~NETIF_F_SG;
- break;
- }
- }
+ if (skb_is_nonsg(head_skb))
+ features &= ~NETIF_F_SG;
}
__skb_push(head_skb, doffset);
--
2.33.0
Fred Li wrote:
> Here is a patch that linearizing skb when downgrade
> gso_size and sg should disabled, If there are no issues,
> I will submit a formal patch shortly.
Target bpf.
Probably does not need quite as many direct CCs.
> Signed-off-by: Fred Li <dracodingfly@gmail.com>
> ---
> include/linux/skbuff.h | 22 ++++++++++++++++++++++
> net/core/filter.c | 16 ++++++++++++----
> net/core/skbuff.c | 19 ++-----------------
> 3 files changed, 36 insertions(+), 21 deletions(-)
>
> diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
> index 5f11f9873341..99b7fc1e826a 100644
> --- a/include/linux/skbuff.h
> +++ b/include/linux/skbuff.h
> @@ -2400,6 +2400,28 @@ static inline unsigned int skb_headlen(const struct sk_buff *skb)
> return skb->len - skb->data_len;
> }
>
> +static inline bool skb_is_nonsg(const struct sk_buff *skb)
> +{
is_nonsg does not cover the functionality, which is fairly subtle.
But maybe we don't need this function at all, see below..
> + struct sk_buff *list_skb = skb_shinfo(skb)->frag_list;
> + struct sk_buff *check_skb;
No need for separate check_skb
> + for (check_skb = list_skb; check_skb; check_skb = check_skb->next) {
> + if (skb_headlen(check_skb) && !check_skb->head_frag) {
> + /* gso_size is untrusted, and we have a frag_list with
> + * a linear non head_frag item.
> + *
> + * If head_skb's headlen does not fit requested gso_size,
> + * it means that the frag_list members do NOT terminate
> + * on exact gso_size boundaries. Hence we cannot perform
> + * skb_frag_t page sharing. Therefore we must fallback to
> + * copying the frag_list skbs; we do so by disabling SG.
> + */
> + return true;
> + }
> + }
> +
> + return false;
> +}
> +
> static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
> {
> unsigned int i, len = 0;
> diff --git a/net/core/filter.c b/net/core/filter.c
> index df4578219e82..c0e6e7f28635 100644
> --- a/net/core/filter.c
> +++ b/net/core/filter.c
> @@ -3525,13 +3525,21 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
> if (skb_is_gso(skb)) {
> struct skb_shared_info *shinfo = skb_shinfo(skb);
>
> - /* Due to header grow, MSS needs to be downgraded. */
> - if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
> - skb_decrease_gso_size(shinfo, len_diff);
> -
> /* Header must be checked, and gso_segs recomputed. */
> shinfo->gso_type |= gso_type;
> shinfo->gso_segs = 0;
> +
> + /* Due to header grow, MSS needs to be downgraded.
> + * There is BUG_ON When segment the frag_list with
> + * head_frag true so linearize skb after downgrade
> + * the MSS.
> + */
> + if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO)) {
> + skb_decrease_gso_size(shinfo, len_diff);
> + if (skb_is_nonsg(skb))
> + return skb_linearize(skb) ? : 0;
> + }
> +
No need for ternary statement.
Instead of the complex test in skb_is_nonsg, can we just assume that
alignment will be off if having frag_list and changing gso_size.
The same will apply to bpf_skb_net_shrink too.
Not sure that it is okay to linearize inside a BPF helper function.
Hopefully bpf experts can chime in on that.
> }
>
> return 0;
> diff --git a/net/core/skbuff.c b/net/core/skbuff.c
> index b1dab1b071fc..81e018185527 100644
> --- a/net/core/skbuff.c
> +++ b/net/core/skbuff.c
> @@ -4458,23 +4458,8 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
>
> if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) &&
> mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) {
> - struct sk_buff *check_skb;
> -
> - for (check_skb = list_skb; check_skb; check_skb = check_skb->next) {
> - if (skb_headlen(check_skb) && !check_skb->head_frag) {
> - /* gso_size is untrusted, and we have a frag_list with
> - * a linear non head_frag item.
> - *
> - * If head_skb's headlen does not fit requested gso_size,
> - * it means that the frag_list members do NOT terminate
> - * on exact gso_size boundaries. Hence we cannot perform
> - * skb_frag_t page sharing. Therefore we must fallback to
> - * copying the frag_list skbs; we do so by disabling SG.
> - */
> - features &= ~NETIF_F_SG;
> - break;
> - }
> - }
> + if (skb_is_nonsg(head_skb))
> + features &= ~NETIF_F_SG;
> }
>
> __skb_push(head_skb, doffset);
> --
> 2.33.0
>
> No need for ternary statement. > > Instead of the complex test in skb_is_nonsg, can we just assume that > alignment will be off if having frag_list and changing gso_size. > > The same will apply to bpf_skb_net_shrink too. increase gso_size may be no problem and we can use BPF_F_ADJ_ROOM_FIXED_GSO to avoid update gso_size when shrink. > > Not sure that it is okay to linearize inside a BPF helper function. > Hopefully bpf experts can chime in on that. Thanks Fred Li
On Tue, Jul 09, 2024 at 11:53:21AM -0400, Willem de Bruijn wrote: > > > + /* Due to header grow, MSS needs to be downgraded. > > + * There is BUG_ON When segment the frag_list with > > + * head_frag true so linearize skb after downgrade > > + * the MSS. > > + */ This sounds completely wrong. You should never grow the TCP header by changing gso_size. What is the usage-scenario for this? Think about it, if a router forwards a TCP packet, and ends up growing its TCP header and then splits the packet into two, then this router is brain-dead. Cheers, -- Email: Herbert Xu <herbert@gondor.apana.org.au> Home Page: http://gondor.apana.org.au/~herbert/ PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
Herbert Xu wrote: > On Tue, Jul 09, 2024 at 11:53:21AM -0400, Willem de Bruijn wrote: > > > > > + /* Due to header grow, MSS needs to be downgraded. > > > + * There is BUG_ON When segment the frag_list with > > > + * head_frag true so linearize skb after downgrade > > > + * the MSS. > > > + */ > > This sounds completely wrong. You should never grow the TCP header > by changing gso_size. What is the usage-scenario for this? > > Think about it, if a router forwards a TCP packet, and ends up > growing its TCP header and then splits the packet into two, then > this router is brain-dead. This is an unfortunate feature, but already exists. It decreases gso_size to account for tunnel headers. For USO, we added BPF_F_ADJ_ROOM_FIXED_GSO to avoid this in better, newer users.
On Tue, Jul 09, 2024 at 05:29:59PM -0400, Willem de Bruijn wrote: > > This is an unfortunate feature, but already exists. > > It decreases gso_size to account for tunnel headers. Growing the tunnel header is totally fine. But you should not decrease gso_size because of that. Instead the correct course of action is to drop the packet and generate an ICMP if it no longer fits the MTU. A router that resegments a TCP packet at the TCP-level (not IP) is brain-dead. Cheers, -- Email: Herbert Xu <herbert@gondor.apana.org.au> Home Page: http://gondor.apana.org.au/~herbert/ PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
© 2016 - 2026 Red Hat, Inc.