drivers/net/ppp/pppoe.c | 104 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 103 insertions(+), 1 deletion(-)
Only handles packets where the pppoe header length field matches the exact
packet length. Significantly improves rx throughput.
When running NAT traffic through a MediaTek MT7621 devices from a host
behind PPPoE to a host directly connected via ethernet, the TCP throughput
that the device is able to handle improves from ~130 Mbit/s to ~630 Mbit/s,
using fraglist GRO.
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
v2: fix compile error
drivers/net/ppp/pppoe.c | 104 +++++++++++++++++++++++++++++++++++++++-
1 file changed, 103 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 410effa42ade..5d35eafd06df 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -77,6 +77,7 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/sock.h>
+#include <net/gro.h>
#include <linux/uaccess.h>
@@ -435,7 +436,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
if (skb->len < len)
goto drop;
- if (pskb_trim_rcsum(skb, len))
+ if (!skb_is_gso(skb) && pskb_trim_rcsum(skb, len))
goto drop;
ph = pppoe_hdr(skb);
@@ -1173,6 +1174,105 @@ static struct pernet_operations pppoe_net_ops = {
.size = sizeof(struct pppoe_net),
};
+static u16
+compare_pppoe_header(struct pppoe_hdr *phdr, struct pppoe_hdr *phdr2)
+{
+ return (__force __u16)((phdr->sid ^ phdr2->sid) |
+ (phdr->tag[0].tag_type ^ phdr2->tag[0].tag_type));
+}
+
+static __be16 pppoe_hdr_proto(struct pppoe_hdr *phdr)
+{
+ switch (phdr->tag[0].tag_type) {
+ case cpu_to_be16(PPP_IP):
+ return cpu_to_be16(ETH_P_IP);
+ case cpu_to_be16(PPP_IPV6):
+ return cpu_to_be16(ETH_P_IPV6);
+ default:
+ return 0;
+ }
+
+}
+
+static struct sk_buff *pppoe_gro_receive(struct list_head *head,
+ struct sk_buff *skb)
+{
+ const struct packet_offload *ptype;
+ unsigned int hlen, off_pppoe;
+ struct sk_buff *pp = NULL;
+ struct pppoe_hdr *phdr;
+ struct sk_buff *p;
+ __be16 type;
+ int flush = 1;
+
+ off_pppoe = skb_gro_offset(skb);
+ hlen = off_pppoe + sizeof(*phdr) + 2;
+ phdr = skb_gro_header(skb, hlen, off_pppoe);
+ if (unlikely(!phdr))
+ goto out;
+
+ /* ignore packets with padding or invalid length */
+ if (skb_gro_len(skb) != be16_to_cpu(phdr->length) + hlen - 2)
+ goto out;
+
+ NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = hlen;
+
+ type = pppoe_hdr_proto(phdr);
+ if (!type)
+ goto out;
+
+ ptype = gro_find_receive_by_type(type);
+ if (!ptype)
+ goto out;
+
+ flush = 0;
+
+ list_for_each_entry(p, head, list) {
+ struct pppoe_hdr *phdr2;
+
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ phdr2 = (struct pppoe_hdr *)(p->data + off_pppoe);
+ if (compare_pppoe_header(phdr, phdr2))
+ NAPI_GRO_CB(p)->same_flow = 0;
+ }
+
+ skb_gro_pull(skb, sizeof(*phdr) + 2);
+ skb_gro_postpull_rcsum(skb, phdr, sizeof(*phdr) + 2);
+
+ pp = ptype->callbacks.gro_receive(head, skb);
+
+out:
+ skb_gro_flush_final(skb, pp, flush);
+
+ return pp;
+}
+
+static int pppoe_gro_complete(struct sk_buff *skb, int nhoff)
+{
+ struct pppoe_hdr *phdr = (struct pppoe_hdr *)(skb->data + nhoff);
+ __be16 type = pppoe_hdr_proto(phdr);
+ struct packet_offload *ptype;
+ int err = -ENOENT;
+
+ ptype = gro_find_complete_by_type(type);
+ if (ptype)
+ err = ptype->callbacks.gro_complete(skb, nhoff +
+ sizeof(*phdr) + 2);
+
+ return err;
+}
+
+static struct packet_offload pppoe_packet_offload __read_mostly = {
+ .type = cpu_to_be16(ETH_P_PPP_SES),
+ .priority = 10,
+ .callbacks = {
+ .gro_receive = pppoe_gro_receive,
+ .gro_complete = pppoe_gro_complete,
+ },
+};
+
static int __init pppoe_init(void)
{
int err;
@@ -1189,6 +1289,7 @@ static int __init pppoe_init(void)
if (err)
goto out_unregister_pppoe_proto;
+ dev_add_offload(&pppoe_packet_offload);
dev_add_pack(&pppoes_ptype);
dev_add_pack(&pppoed_ptype);
register_netdevice_notifier(&pppoe_notifier);
@@ -1208,6 +1309,7 @@ static void __exit pppoe_exit(void)
unregister_netdevice_notifier(&pppoe_notifier);
dev_remove_pack(&pppoed_ptype);
dev_remove_pack(&pppoes_ptype);
+ dev_remove_offload(&pppoe_packet_offload);
unregister_pppox_proto(PX_PROTO_OE);
proto_unregister(&pppoe_sk_proto);
unregister_pernet_device(&pppoe_net_ops);
--
2.50.1
Felix Fietkau wrote:
> Only handles packets where the pppoe header length field matches the exact
> packet length. Significantly improves rx throughput.
>
> When running NAT traffic through a MediaTek MT7621 devices from a host
> behind PPPoE to a host directly connected via ethernet, the TCP throughput
> that the device is able to handle improves from ~130 Mbit/s to ~630 Mbit/s,
> using fraglist GRO.
>
> Signed-off-by: Felix Fietkau <nbd@nbd.name>
> ---
> v2: fix compile error
>
> drivers/net/ppp/pppoe.c | 104 +++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 103 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
> index 410effa42ade..5d35eafd06df 100644
> --- a/drivers/net/ppp/pppoe.c
> +++ b/drivers/net/ppp/pppoe.c
> @@ -77,6 +77,7 @@
> #include <net/net_namespace.h>
> #include <net/netns/generic.h>
> #include <net/sock.h>
> +#include <net/gro.h>
>
> #include <linux/uaccess.h>
>
> @@ -435,7 +436,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
> if (skb->len < len)
> goto drop;
>
> - if (pskb_trim_rcsum(skb, len))
> + if (!skb_is_gso(skb) && pskb_trim_rcsum(skb, len))
> goto drop;
>
> ph = pppoe_hdr(skb);
> @@ -1173,6 +1174,105 @@ static struct pernet_operations pppoe_net_ops = {
> .size = sizeof(struct pppoe_net),
> };
>
> +static u16
> +compare_pppoe_header(struct pppoe_hdr *phdr, struct pppoe_hdr *phdr2)
> +{
> + return (__force __u16)((phdr->sid ^ phdr2->sid) |
> + (phdr->tag[0].tag_type ^ phdr2->tag[0].tag_type));
> +}
> +
> +static __be16 pppoe_hdr_proto(struct pppoe_hdr *phdr)
> +{
> + switch (phdr->tag[0].tag_type) {
> + case cpu_to_be16(PPP_IP):
> + return cpu_to_be16(ETH_P_IP);
> + case cpu_to_be16(PPP_IPV6):
> + return cpu_to_be16(ETH_P_IPV6);
> + default:
> + return 0;
> + }
> +
> +}
> +
> +static struct sk_buff *pppoe_gro_receive(struct list_head *head,
> + struct sk_buff *skb)
> +{
> + const struct packet_offload *ptype;
> + unsigned int hlen, off_pppoe;
> + struct sk_buff *pp = NULL;
> + struct pppoe_hdr *phdr;
> + struct sk_buff *p;
> + __be16 type;
> + int flush = 1;
> +
> + off_pppoe = skb_gro_offset(skb);
> + hlen = off_pppoe + sizeof(*phdr) + 2;
> + phdr = skb_gro_header(skb, hlen, off_pppoe);
> + if (unlikely(!phdr))
> + goto out;
> +
> + /* ignore packets with padding or invalid length */
> + if (skb_gro_len(skb) != be16_to_cpu(phdr->length) + hlen - 2)
> + goto out;
> +
> + NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = hlen;
This is not required as {inet,ipv6}_gro_receive already set the network offset.
> +
> + type = pppoe_hdr_proto(phdr);
> + if (!type)
> + goto out;
> +
> + ptype = gro_find_receive_by_type(type);
> + if (!ptype)
> + goto out;
> +
> + flush = 0;
> +
> + list_for_each_entry(p, head, list) {
> + struct pppoe_hdr *phdr2;
> +
> + if (!NAPI_GRO_CB(p)->same_flow)
> + continue;
> +
> + phdr2 = (struct pppoe_hdr *)(p->data + off_pppoe);
> + if (compare_pppoe_header(phdr, phdr2))
> + NAPI_GRO_CB(p)->same_flow = 0;
> + }
> +
> + skb_gro_pull(skb, sizeof(*phdr) + 2);
> + skb_gro_postpull_rcsum(skb, phdr, sizeof(*phdr) + 2);
> +
> + pp = ptype->callbacks.gro_receive(head, skb);
> +
> +out:
> + skb_gro_flush_final(skb, pp, flush);
> +
> + return pp;
> +}
> +
> +static int pppoe_gro_complete(struct sk_buff *skb, int nhoff)
> +{
> + struct pppoe_hdr *phdr = (struct pppoe_hdr *)(skb->data + nhoff);
> + __be16 type = pppoe_hdr_proto(phdr);
> + struct packet_offload *ptype;
> + int err = -ENOENT;
> +
> + ptype = gro_find_complete_by_type(type);
> + if (ptype)
> + err = ptype->callbacks.gro_complete(skb, nhoff +
> + sizeof(*phdr) + 2);
> +
> + return err;
> +}
Shouldn't pppoe_gro_complete fix the length field of the pppoe header?
> +
> +static struct packet_offload pppoe_packet_offload __read_mostly = {
> + .type = cpu_to_be16(ETH_P_PPP_SES),
> + .priority = 10,
> + .callbacks = {
> + .gro_receive = pppoe_gro_receive,
> + .gro_complete = pppoe_gro_complete,
> + },
> +};
> +
> static int __init pppoe_init(void)
> {
> int err;
> @@ -1189,6 +1289,7 @@ static int __init pppoe_init(void)
> if (err)
> goto out_unregister_pppoe_proto;
>
> + dev_add_offload(&pppoe_packet_offload);
> dev_add_pack(&pppoes_ptype);
> dev_add_pack(&pppoed_ptype);
> register_netdevice_notifier(&pppoe_notifier);
> @@ -1208,6 +1309,7 @@ static void __exit pppoe_exit(void)
> unregister_netdevice_notifier(&pppoe_notifier);
> dev_remove_pack(&pppoed_ptype);
> dev_remove_pack(&pppoes_ptype);
> + dev_remove_offload(&pppoe_packet_offload);
> unregister_pppox_proto(PX_PROTO_OE);
> proto_unregister(&pppoe_sk_proto);
> unregister_pernet_device(&pppoe_net_ops);
On Wed, Jul 16, 2025 at 1:14 AM Felix Fietkau <nbd@nbd.name> wrote: > > Only handles packets where the pppoe header length field matches the exact > packet length. Significantly improves rx throughput. > > When running NAT traffic through a MediaTek MT7621 devices from a host > behind PPPoE to a host directly connected via ethernet, the TCP throughput > that the device is able to handle improves from ~130 Mbit/s to ~630 Mbit/s, > using fraglist GRO. > > Signed-off-by: Felix Fietkau <nbd@nbd.name> > --- Shouldn't we first add GSO support ? Otherwise forwarding will break ?
On 7/22/25 4:04 PM, Eric Dumazet wrote: > On Wed, Jul 16, 2025 at 1:14 AM Felix Fietkau <nbd@nbd.name> wrote: >> >> Only handles packets where the pppoe header length field matches the exact >> packet length. Significantly improves rx throughput. >> >> When running NAT traffic through a MediaTek MT7621 devices from a host >> behind PPPoE to a host directly connected via ethernet, the TCP throughput >> that the device is able to handle improves from ~130 Mbit/s to ~630 Mbit/s, >> using fraglist GRO. >> >> Signed-off-by: Felix Fietkau <nbd@nbd.name> >> --- > > Shouldn't we first add GSO support ? I *think* the current __skb_gso_segment() should be able to segment a pppoe GSO packet, as the pppoe header is static/constant, skb->mac_len will include both eth/pppoe and skb->protocol should be the actual L3. /P
On 22.07.25 16:47, Paolo Abeni wrote: > On 7/22/25 4:04 PM, Eric Dumazet wrote: >> On Wed, Jul 16, 2025 at 1:14 AM Felix Fietkau <nbd@nbd.name> wrote: >>> >>> Only handles packets where the pppoe header length field matches the exact >>> packet length. Significantly improves rx throughput. >>> >>> When running NAT traffic through a MediaTek MT7621 devices from a host >>> behind PPPoE to a host directly connected via ethernet, the TCP throughput >>> that the device is able to handle improves from ~130 Mbit/s to ~630 Mbit/s, >>> using fraglist GRO. >>> >>> Signed-off-by: Felix Fietkau <nbd@nbd.name> >>> --- >> >> Shouldn't we first add GSO support ? > > I *think* the current __skb_gso_segment() should be able to segment a > pppoe GSO packet, as the pppoe header is static/constant, skb->mac_len > will include both eth/pppoe and skb->protocol should be the actual L3. If the packet is received on a ppp device, __skb_gso_segment works afterwards. However, for the bridge forwarding case, Eric is correct. In this case, skb->protocol will not be the L3 protocol. I will add GSO support in v3. Thanks, - Felix
On 7/16/25 10:14 AM, Felix Fietkau wrote:
> +static struct sk_buff *pppoe_gro_receive(struct list_head *head,
> + struct sk_buff *skb)
> +{
> + const struct packet_offload *ptype;
> + unsigned int hlen, off_pppoe;
> + struct sk_buff *pp = NULL;
> + struct pppoe_hdr *phdr;
> + struct sk_buff *p;
> + __be16 type;
> + int flush = 1;
Minor nit: please respect the reverse christmas tree order above
> + off_pppoe = skb_gro_offset(skb);
> + hlen = off_pppoe + sizeof(*phdr) + 2;
> + phdr = skb_gro_header(skb, hlen, off_pppoe);
> + if (unlikely(!phdr))
> + goto out;
> +
> + /* ignore packets with padding or invalid length */
> + if (skb_gro_len(skb) != be16_to_cpu(phdr->length) + hlen - 2)
> + goto out;
> +
> + NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = hlen;
> +
> + type = pppoe_hdr_proto(phdr);
> + if (!type)
> + goto out;
> +
> + ptype = gro_find_receive_by_type(type);
> + if (!ptype)
> + goto out;
> +
> + flush = 0;
> +
> + list_for_each_entry(p, head, list) {
> + struct pppoe_hdr *phdr2;
> +
> + if (!NAPI_GRO_CB(p)->same_flow)
> + continue;
> +
> + phdr2 = (struct pppoe_hdr *)(p->data + off_pppoe);
> + if (compare_pppoe_header(phdr, phdr2))
> + NAPI_GRO_CB(p)->same_flow = 0;
> + }
> +
> + skb_gro_pull(skb, sizeof(*phdr) + 2);
> + skb_gro_postpull_rcsum(skb, phdr, sizeof(*phdr) + 2);
> +
> + pp = ptype->callbacks.gro_receive(head, skb);
Here you can use INDIRECT_CALL_INET()
> +
> +out:
> + skb_gro_flush_final(skb, pp, flush);
> +
> + return pp;
> +}
> +
> +static int pppoe_gro_complete(struct sk_buff *skb, int nhoff)
> +{
> + struct pppoe_hdr *phdr = (struct pppoe_hdr *)(skb->data + nhoff);
> + __be16 type = pppoe_hdr_proto(phdr);
> + struct packet_offload *ptype;
> + int err = -ENOENT;
> +
> + ptype = gro_find_complete_by_type(type);
> + if (ptype)
> + err = ptype->callbacks.gro_complete(skb, nhoff +
> + sizeof(*phdr) + 2);
Possibly even here but it's less relevant.
> +
> + return err;
> +}
> +
> +static struct packet_offload pppoe_packet_offload __read_mostly = {
> + .type = cpu_to_be16(ETH_P_PPP_SES),
> + .priority = 10,
The priority value should be IMHO greater then the exiting ones to avoid
possible regressions on other protocols. i.e. 20 should do.
Thanks,
Paolo
On 22.07.25 10:36, Paolo Abeni wrote:
> On 7/16/25 10:14 AM, Felix Fietkau wrote:
>> +static struct sk_buff *pppoe_gro_receive(struct list_head *head,
>> + struct sk_buff *skb)
>> +{
>> + const struct packet_offload *ptype;
>> + unsigned int hlen, off_pppoe;
>> + struct sk_buff *pp = NULL;
>> + struct pppoe_hdr *phdr;
>> + struct sk_buff *p;
>> + __be16 type;
>> + int flush = 1;
>
> Minor nit: please respect the reverse christmas tree order above
Will do
>> + off_pppoe = skb_gro_offset(skb);
>> + hlen = off_pppoe + sizeof(*phdr) + 2;
>> + phdr = skb_gro_header(skb, hlen, off_pppoe);
>> + if (unlikely(!phdr))
>> + goto out;
>> +
>> + /* ignore packets with padding or invalid length */
>> + if (skb_gro_len(skb) != be16_to_cpu(phdr->length) + hlen - 2)
>> + goto out;
>> +
>> + NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = hlen;
>> +
>> + type = pppoe_hdr_proto(phdr);
>> + if (!type)
>> + goto out;
>> +
>> + ptype = gro_find_receive_by_type(type);
>> + if (!ptype)
>> + goto out;
>> +
>> + flush = 0;
>> +
>> + list_for_each_entry(p, head, list) {
>> + struct pppoe_hdr *phdr2;
>> +
>> + if (!NAPI_GRO_CB(p)->same_flow)
>> + continue;
>> +
>> + phdr2 = (struct pppoe_hdr *)(p->data + off_pppoe);
>> + if (compare_pppoe_header(phdr, phdr2))
>> + NAPI_GRO_CB(p)->same_flow = 0;
>> + }
>> +
>> + skb_gro_pull(skb, sizeof(*phdr) + 2);
>> + skb_gro_postpull_rcsum(skb, phdr, sizeof(*phdr) + 2);
>> +
>> + pp = ptype->callbacks.gro_receive(head, skb);
>
> Here you can use INDIRECT_CALL_INET()
I did that in the initial version, but then I got reports of build
failures with the patch:
ERROR: modpost: "inet_gro_receive" [drivers/net/ppp/pppoe.ko] undefined!
ERROR: modpost: "inet_gro_complete" [drivers/net/ppp/pppoe.ko] undefined!
Should I leave it out, or export inet_gro_receive/complete?
>> +
>> +out:
>> + skb_gro_flush_final(skb, pp, flush);
>> +
>> + return pp;
>> +}
>> +
>> +static int pppoe_gro_complete(struct sk_buff *skb, int nhoff)
>> +{
>> + struct pppoe_hdr *phdr = (struct pppoe_hdr *)(skb->data + nhoff);
>> + __be16 type = pppoe_hdr_proto(phdr);
>> + struct packet_offload *ptype;
>> + int err = -ENOENT;
>> +
>> + ptype = gro_find_complete_by_type(type);
>> + if (ptype)
>> + err = ptype->callbacks.gro_complete(skb, nhoff +
>> + sizeof(*phdr) + 2);
>
> Possibly even here but it's less relevant.
>
>> +
>> + return err;
>> +}
>> +
>> +static struct packet_offload pppoe_packet_offload __read_mostly = {
>> + .type = cpu_to_be16(ETH_P_PPP_SES),
>> + .priority = 10,
>
> The priority value should be IMHO greater then the exiting ones to avoid
> possible regressions on other protocols. i.e. 20 should do.
I'll fix it in v3.
Thanks,
- Felix
From: Felix Fietkau <nbd@nbd.name>
Date: Tue, 22 Jul 2025 10:56:10 +0200
> On 22.07.25 10:36, Paolo Abeni wrote:
>> On 7/16/25 10:14 AM, Felix Fietkau wrote:
>>> +static struct sk_buff *pppoe_gro_receive(struct list_head *head,
>>> + struct sk_buff *skb)
>>> +{
>>> + const struct packet_offload *ptype;
>>> + unsigned int hlen, off_pppoe;
>>> + struct sk_buff *pp = NULL;
>>> + struct pppoe_hdr *phdr;
>>> + struct sk_buff *p;
>>> + __be16 type;
>>> + int flush = 1;
>>
>> Minor nit: please respect the reverse christmas tree order above
>
> Will do
>
>>> + off_pppoe = skb_gro_offset(skb);
>>> + hlen = off_pppoe + sizeof(*phdr) + 2;
>>> + phdr = skb_gro_header(skb, hlen, off_pppoe);
>>> + if (unlikely(!phdr))
>>> + goto out;
>>> +
>>> + /* ignore packets with padding or invalid length */
>>> + if (skb_gro_len(skb) != be16_to_cpu(phdr->length) + hlen - 2)
>>> + goto out;
>>> +
>>> + NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark]
>>> = hlen;
>>> +
>>> + type = pppoe_hdr_proto(phdr);
>>> + if (!type)
>>> + goto out;
>>> +
>>> + ptype = gro_find_receive_by_type(type);
>>> + if (!ptype)
>>> + goto out;
>>> +
>>> + flush = 0;
>>> +
>>> + list_for_each_entry(p, head, list) {
>>> + struct pppoe_hdr *phdr2;
>>> +
>>> + if (!NAPI_GRO_CB(p)->same_flow)
>>> + continue;
>>> +
>>> + phdr2 = (struct pppoe_hdr *)(p->data + off_pppoe);
>>> + if (compare_pppoe_header(phdr, phdr2))
>>> + NAPI_GRO_CB(p)->same_flow = 0;
>>> + }
>>> +
>>> + skb_gro_pull(skb, sizeof(*phdr) + 2);
>>> + skb_gro_postpull_rcsum(skb, phdr, sizeof(*phdr) + 2);
>>> +
>>> + pp = ptype->callbacks.gro_receive(head, skb);
>>
>> Here you can use INDIRECT_CALL_INET()
>
> I did that in the initial version, but then I got reports of build
> failures with the patch:
>
> ERROR: modpost: "inet_gro_receive" [drivers/net/ppp/pppoe.ko] undefined!
> ERROR: modpost: "inet_gro_complete" [drivers/net/ppp/pppoe.ko] undefined!
>
> Should I leave it out, or export inet_gro_receive/complete?
Could be exported I'd say. This would allows more modules to implement
GRO support.
INDIRECT_CALL() here gives good boosts here, at least I got some after
switching VLAN and TEB code several years ago.
If everyone is fine with exporting, I'd say those need to be exported as
GPL-only.
>>> +
>>> +out:
>>> + skb_gro_flush_final(skb, pp, flush);
>>> +
>>> + return pp;
Thanks,
Olek
On 7/25/25 2:42 PM, Alexander Lobakin wrote:
> From: Felix Fietkau <nbd@nbd.name> Date: Tue, 22 Jul 2025 10:56:10 +0200
>> On 22.07.25 10:36, Paolo Abeni wrote:
>>> On 7/16/25 10:14 AM, Felix Fietkau wrote:
>>>> +static struct sk_buff *pppoe_gro_receive(struct list_head *head,
>>>> + struct sk_buff *skb)
>>>> +{
>>>> + const struct packet_offload *ptype;
>>>> + unsigned int hlen, off_pppoe;
>>>> + struct sk_buff *pp = NULL;
>>>> + struct pppoe_hdr *phdr;
>>>> + struct sk_buff *p;
>>>> + __be16 type;
>>>> + int flush = 1;
>>>
>>> Minor nit: please respect the reverse christmas tree order above
>>
>> Will do
>>
>>>> + off_pppoe = skb_gro_offset(skb);
>>>> + hlen = off_pppoe + sizeof(*phdr) + 2;
>>>> + phdr = skb_gro_header(skb, hlen, off_pppoe);
>>>> + if (unlikely(!phdr))
>>>> + goto out;
>>>> +
>>>> + /* ignore packets with padding or invalid length */
>>>> + if (skb_gro_len(skb) != be16_to_cpu(phdr->length) + hlen - 2)
>>>> + goto out;
>>>> +
>>>> + NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark]
>>>> = hlen;
>>>> +
>>>> + type = pppoe_hdr_proto(phdr);
>>>> + if (!type)
>>>> + goto out;
>>>> +
>>>> + ptype = gro_find_receive_by_type(type);
>>>> + if (!ptype)
>>>> + goto out;
>>>> +
>>>> + flush = 0;
>>>> +
>>>> + list_for_each_entry(p, head, list) {
>>>> + struct pppoe_hdr *phdr2;
>>>> +
>>>> + if (!NAPI_GRO_CB(p)->same_flow)
>>>> + continue;
>>>> +
>>>> + phdr2 = (struct pppoe_hdr *)(p->data + off_pppoe);
>>>> + if (compare_pppoe_header(phdr, phdr2))
>>>> + NAPI_GRO_CB(p)->same_flow = 0;
>>>> + }
>>>> +
>>>> + skb_gro_pull(skb, sizeof(*phdr) + 2);
>>>> + skb_gro_postpull_rcsum(skb, phdr, sizeof(*phdr) + 2);
>>>> +
>>>> + pp = ptype->callbacks.gro_receive(head, skb);
>>>
>>> Here you can use INDIRECT_CALL_INET()
>>
>> I did that in the initial version, but then I got reports of build
>> failures with the patch:
>>
>> ERROR: modpost: "inet_gro_receive" [drivers/net/ppp/pppoe.ko] undefined!
>> ERROR: modpost: "inet_gro_complete" [drivers/net/ppp/pppoe.ko] undefined!
>>
>> Should I leave it out, or export inet_gro_receive/complete?
>
> Could be exported I'd say. This would allows more modules to implement
> GRO support.
> INDIRECT_CALL() here gives good boosts here, at least I got some after
> switching VLAN and TEB code several years ago.
>
> If everyone is fine with exporting, I'd say those need to be exported as
> GPL-only.
Thanks Olek, I was almost missing this!
I agree with exporting GPL-only the symbols.
Thanks,
Paolo
© 2016 - 2026 Red Hat, Inc.