In lots of places, bpf_prog pointer is used only for tracing or other
stuff that doesn't modify the structure itself. Same for net_device.
Address at least some of them and add `const` attributes there. The
object code didn't change, but that may prevent unwanted data
modifications and also allow more helpers to have const arguments.
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
---
include/linux/bpf.h | 12 ++++++------
include/linux/filter.h | 9 +++++----
include/linux/netdevice.h | 6 +++---
include/linux/skbuff.h | 2 +-
kernel/bpf/devmap.c | 8 ++++----
net/core/dev.c | 10 +++++-----
net/core/filter.c | 29 ++++++++++++++++-------------
net/core/skbuff.c | 2 +-
8 files changed, 41 insertions(+), 37 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 19d8ca8ac960..263515478984 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -2534,10 +2534,10 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
struct bpf_map *map, bool exclude_ingress);
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
- struct bpf_prog *xdp_prog);
+ const struct bpf_prog *xdp_prog);
int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
- struct bpf_prog *xdp_prog, struct bpf_map *map,
- bool exclude_ingress);
+ const struct bpf_prog *xdp_prog,
+ struct bpf_map *map, bool exclude_ingress);
void __cpu_map_flush(struct list_head *flush_list);
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
@@ -2801,15 +2801,15 @@ struct sk_buff;
static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
struct sk_buff *skb,
- struct bpf_prog *xdp_prog)
+ const struct bpf_prog *xdp_prog)
{
return 0;
}
static inline
int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
- struct bpf_prog *xdp_prog, struct bpf_map *map,
- bool exclude_ingress)
+ const struct bpf_prog *xdp_prog,
+ struct bpf_map *map, bool exclude_ingress)
{
return 0;
}
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 7d7578a8eac1..ee067ab13272 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -1178,17 +1178,18 @@ static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
* This does not appear to be a real limitation for existing software.
*/
int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
- struct xdp_buff *xdp, struct bpf_prog *prog);
+ struct xdp_buff *xdp, const struct bpf_prog *prog);
int xdp_do_redirect(struct net_device *dev,
struct xdp_buff *xdp,
- struct bpf_prog *prog);
+ const struct bpf_prog *prog);
int xdp_do_redirect_frame(struct net_device *dev,
struct xdp_buff *xdp,
struct xdp_frame *xdpf,
- struct bpf_prog *prog);
+ const struct bpf_prog *prog);
void xdp_do_flush(void);
-void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act);
+void bpf_warn_invalid_xdp_action(const struct net_device *dev,
+ const struct bpf_prog *prog, u32 act);
#ifdef CONFIG_INET
struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 8feaca12655e..72f53e7610ec 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3932,9 +3932,9 @@ static inline void dev_consume_skb_any(struct sk_buff *skb)
}
u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog);
-void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
-int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb);
+ const struct bpf_prog *xdp_prog);
+void generic_xdp_tx(struct sk_buff *skb, const struct bpf_prog *xdp_prog);
+int do_xdp_generic(const struct bpf_prog *xdp_prog, struct sk_buff **pskb);
int netif_rx(struct sk_buff *skb);
int __netif_rx(struct sk_buff *skb);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f187a2415fb8..c867df5b1051 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3595,7 +3595,7 @@ static inline netmem_ref skb_frag_netmem(const skb_frag_t *frag)
int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
unsigned int headroom);
int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
- struct bpf_prog *prog);
+ const struct bpf_prog *prog);
/**
* skb_frag_address - gets the address of the data contained in a paged fragment
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 9e0e3b0a18e4..f634b87aa0fa 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -675,7 +675,7 @@ int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
}
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
- struct bpf_prog *xdp_prog)
+ const struct bpf_prog *xdp_prog)
{
int err;
@@ -698,7 +698,7 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst,
struct sk_buff *skb,
- struct bpf_prog *xdp_prog)
+ const struct bpf_prog *xdp_prog)
{
struct sk_buff *nskb;
int err;
@@ -717,8 +717,8 @@ static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst,
}
int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
- struct bpf_prog *xdp_prog, struct bpf_map *map,
- bool exclude_ingress)
+ const struct bpf_prog *xdp_prog,
+ struct bpf_map *map, bool exclude_ingress)
{
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
struct bpf_dtab_netdev *dst, *last_dst = NULL;
diff --git a/net/core/dev.c b/net/core/dev.c
index c682173a7642..b857abb5c0e9 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4927,7 +4927,7 @@ static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
}
u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog)
+ const struct bpf_prog *xdp_prog)
{
void *orig_data, *orig_data_end, *hard_start;
struct netdev_rx_queue *rxqueue;
@@ -5029,7 +5029,7 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
}
static int
-netif_skb_check_for_xdp(struct sk_buff **pskb, struct bpf_prog *prog)
+netif_skb_check_for_xdp(struct sk_buff **pskb, const struct bpf_prog *prog)
{
struct sk_buff *skb = *pskb;
int err, hroom, troom;
@@ -5053,7 +5053,7 @@ netif_skb_check_for_xdp(struct sk_buff **pskb, struct bpf_prog *prog)
static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog)
+ const struct bpf_prog *xdp_prog)
{
struct sk_buff *skb = *pskb;
u32 mac_len, act = XDP_DROP;
@@ -5106,7 +5106,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
* and DDOS attacks will be more effective. In-driver-XDP use dedicated TX
* queues, so they do not have this starvation issue.
*/
-void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
+void generic_xdp_tx(struct sk_buff *skb, const struct bpf_prog *xdp_prog)
{
struct net_device *dev = skb->dev;
struct netdev_queue *txq;
@@ -5131,7 +5131,7 @@ void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
-int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb)
+int do_xdp_generic(const struct bpf_prog *xdp_prog, struct sk_buff **pskb)
{
struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
diff --git a/net/core/filter.c b/net/core/filter.c
index a88e6924c4c0..8dfa9493d2f3 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -4349,9 +4349,9 @@ u32 xdp_master_redirect(struct xdp_buff *xdp)
EXPORT_SYMBOL_GPL(xdp_master_redirect);
static inline int __xdp_do_redirect_xsk(struct bpf_redirect_info *ri,
- struct net_device *dev,
+ const struct net_device *dev,
struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog)
+ const struct bpf_prog *xdp_prog)
{
enum bpf_map_type map_type = ri->map_type;
void *fwd = ri->tgt_value;
@@ -4372,10 +4372,10 @@ static inline int __xdp_do_redirect_xsk(struct bpf_redirect_info *ri,
return err;
}
-static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
- struct net_device *dev,
- struct xdp_frame *xdpf,
- struct bpf_prog *xdp_prog)
+static __always_inline int
+__xdp_do_redirect_frame(struct bpf_redirect_info *ri, struct net_device *dev,
+ struct xdp_frame *xdpf,
+ const struct bpf_prog *xdp_prog)
{
enum bpf_map_type map_type = ri->map_type;
void *fwd = ri->tgt_value;
@@ -4444,7 +4444,7 @@ static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
}
int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog)
+ const struct bpf_prog *xdp_prog)
{
struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
enum bpf_map_type map_type = ri->map_type;
@@ -4458,7 +4458,8 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
EXPORT_SYMBOL_GPL(xdp_do_redirect);
int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp,
- struct xdp_frame *xdpf, struct bpf_prog *xdp_prog)
+ struct xdp_frame *xdpf,
+ const struct bpf_prog *xdp_prog)
{
struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
enum bpf_map_type map_type = ri->map_type;
@@ -4473,9 +4474,9 @@ EXPORT_SYMBOL_GPL(xdp_do_redirect_frame);
static int xdp_do_generic_redirect_map(struct net_device *dev,
struct sk_buff *skb,
struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog, void *fwd,
- enum bpf_map_type map_type, u32 map_id,
- u32 flags)
+ const struct bpf_prog *xdp_prog,
+ void *fwd, enum bpf_map_type map_type,
+ u32 map_id, u32 flags)
{
struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
struct bpf_map *map;
@@ -4529,7 +4530,8 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
}
int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
- struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
+ struct xdp_buff *xdp,
+ const struct bpf_prog *xdp_prog)
{
struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
enum bpf_map_type map_type = ri->map_type;
@@ -9088,7 +9090,8 @@ static bool xdp_is_valid_access(int off, int size,
return __is_valid_xdp_access(off, size);
}
-void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act)
+void bpf_warn_invalid_xdp_action(const struct net_device *dev,
+ const struct bpf_prog *prog, u32 act)
{
const u32 act_max = XDP_REDIRECT;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 00afeb90c23a..224cfe8b4368 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1009,7 +1009,7 @@ int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
EXPORT_SYMBOL(skb_pp_cow_data);
int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
- struct bpf_prog *prog)
+ const struct bpf_prog *prog)
{
if (!prog->aux->xdp_has_frags)
return -EINVAL;
--
2.46.2
On Tue, Oct 15, 2024 at 04:53:36PM +0200, Alexander Lobakin wrote: > In lots of places, bpf_prog pointer is used only for tracing or other > stuff that doesn't modify the structure itself. Same for net_device. > Address at least some of them and add `const` attributes there. The > object code didn't change, but that may prevent unwanted data > modifications and also allow more helpers to have const arguments. I believe that this patch is not related to idpf XDP support at all. This could be pulled out and send separately and reduce the amount of code jungle that one has to go through in this set ;) > > Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com> > --- > include/linux/bpf.h | 12 ++++++------ > include/linux/filter.h | 9 +++++---- > include/linux/netdevice.h | 6 +++--- > include/linux/skbuff.h | 2 +- > kernel/bpf/devmap.c | 8 ++++---- > net/core/dev.c | 10 +++++----- > net/core/filter.c | 29 ++++++++++++++++------------- > net/core/skbuff.c | 2 +- > 8 files changed, 41 insertions(+), 37 deletions(-) > > diff --git a/include/linux/bpf.h b/include/linux/bpf.h > index 19d8ca8ac960..263515478984 100644 > --- a/include/linux/bpf.h > +++ b/include/linux/bpf.h > @@ -2534,10 +2534,10 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf, > int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx, > struct bpf_map *map, bool exclude_ingress); > int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, > - struct bpf_prog *xdp_prog); > + const struct bpf_prog *xdp_prog); > int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, > - struct bpf_prog *xdp_prog, struct bpf_map *map, > - bool exclude_ingress); > + const struct bpf_prog *xdp_prog, > + struct bpf_map *map, bool exclude_ingress); > > void __cpu_map_flush(struct list_head *flush_list); > int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf, > @@ -2801,15 +2801,15 @@ struct sk_buff; > > static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, > struct sk_buff *skb, > - struct bpf_prog *xdp_prog) > + const struct bpf_prog *xdp_prog) > { > return 0; > } > > static inline > int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, > - struct bpf_prog *xdp_prog, struct bpf_map *map, > - bool exclude_ingress) > + const struct bpf_prog *xdp_prog, > + struct bpf_map *map, bool exclude_ingress) > { > return 0; > } > diff --git a/include/linux/filter.h b/include/linux/filter.h > index 7d7578a8eac1..ee067ab13272 100644 > --- a/include/linux/filter.h > +++ b/include/linux/filter.h > @@ -1178,17 +1178,18 @@ static inline int xdp_ok_fwd_dev(const struct net_device *fwd, > * This does not appear to be a real limitation for existing software. > */ > int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, > - struct xdp_buff *xdp, struct bpf_prog *prog); > + struct xdp_buff *xdp, const struct bpf_prog *prog); > int xdp_do_redirect(struct net_device *dev, > struct xdp_buff *xdp, > - struct bpf_prog *prog); > + const struct bpf_prog *prog); > int xdp_do_redirect_frame(struct net_device *dev, > struct xdp_buff *xdp, > struct xdp_frame *xdpf, > - struct bpf_prog *prog); > + const struct bpf_prog *prog); > void xdp_do_flush(void); > > -void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act); > +void bpf_warn_invalid_xdp_action(const struct net_device *dev, > + const struct bpf_prog *prog, u32 act); > > #ifdef CONFIG_INET > struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, > diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h > index 8feaca12655e..72f53e7610ec 100644 > --- a/include/linux/netdevice.h > +++ b/include/linux/netdevice.h > @@ -3932,9 +3932,9 @@ static inline void dev_consume_skb_any(struct sk_buff *skb) > } > > u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, > - struct bpf_prog *xdp_prog); > -void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog); > -int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb); > + const struct bpf_prog *xdp_prog); > +void generic_xdp_tx(struct sk_buff *skb, const struct bpf_prog *xdp_prog); > +int do_xdp_generic(const struct bpf_prog *xdp_prog, struct sk_buff **pskb); > int netif_rx(struct sk_buff *skb); > int __netif_rx(struct sk_buff *skb); > > diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h > index f187a2415fb8..c867df5b1051 100644 > --- a/include/linux/skbuff.h > +++ b/include/linux/skbuff.h > @@ -3595,7 +3595,7 @@ static inline netmem_ref skb_frag_netmem(const skb_frag_t *frag) > int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb, > unsigned int headroom); > int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb, > - struct bpf_prog *prog); > + const struct bpf_prog *prog); > > /** > * skb_frag_address - gets the address of the data contained in a paged fragment > diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c > index 9e0e3b0a18e4..f634b87aa0fa 100644 > --- a/kernel/bpf/devmap.c > +++ b/kernel/bpf/devmap.c > @@ -675,7 +675,7 @@ int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx, > } > > int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, > - struct bpf_prog *xdp_prog) > + const struct bpf_prog *xdp_prog) > { > int err; > > @@ -698,7 +698,7 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, > > static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst, > struct sk_buff *skb, > - struct bpf_prog *xdp_prog) > + const struct bpf_prog *xdp_prog) > { > struct sk_buff *nskb; > int err; > @@ -717,8 +717,8 @@ static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst, > } > > int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, > - struct bpf_prog *xdp_prog, struct bpf_map *map, > - bool exclude_ingress) > + const struct bpf_prog *xdp_prog, > + struct bpf_map *map, bool exclude_ingress) > { > struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); > struct bpf_dtab_netdev *dst, *last_dst = NULL; > diff --git a/net/core/dev.c b/net/core/dev.c > index c682173a7642..b857abb5c0e9 100644 > --- a/net/core/dev.c > +++ b/net/core/dev.c > @@ -4927,7 +4927,7 @@ static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb) > } > > u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, > - struct bpf_prog *xdp_prog) > + const struct bpf_prog *xdp_prog) > { > void *orig_data, *orig_data_end, *hard_start; > struct netdev_rx_queue *rxqueue; > @@ -5029,7 +5029,7 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, > } > > static int > -netif_skb_check_for_xdp(struct sk_buff **pskb, struct bpf_prog *prog) > +netif_skb_check_for_xdp(struct sk_buff **pskb, const struct bpf_prog *prog) > { > struct sk_buff *skb = *pskb; > int err, hroom, troom; > @@ -5053,7 +5053,7 @@ netif_skb_check_for_xdp(struct sk_buff **pskb, struct bpf_prog *prog) > > static u32 netif_receive_generic_xdp(struct sk_buff **pskb, > struct xdp_buff *xdp, > - struct bpf_prog *xdp_prog) > + const struct bpf_prog *xdp_prog) > { > struct sk_buff *skb = *pskb; > u32 mac_len, act = XDP_DROP; > @@ -5106,7 +5106,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff **pskb, > * and DDOS attacks will be more effective. In-driver-XDP use dedicated TX > * queues, so they do not have this starvation issue. > */ > -void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog) > +void generic_xdp_tx(struct sk_buff *skb, const struct bpf_prog *xdp_prog) > { > struct net_device *dev = skb->dev; > struct netdev_queue *txq; > @@ -5131,7 +5131,7 @@ void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog) > > static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key); > > -int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb) > +int do_xdp_generic(const struct bpf_prog *xdp_prog, struct sk_buff **pskb) > { > struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; > > diff --git a/net/core/filter.c b/net/core/filter.c > index a88e6924c4c0..8dfa9493d2f3 100644 > --- a/net/core/filter.c > +++ b/net/core/filter.c > @@ -4349,9 +4349,9 @@ u32 xdp_master_redirect(struct xdp_buff *xdp) > EXPORT_SYMBOL_GPL(xdp_master_redirect); > > static inline int __xdp_do_redirect_xsk(struct bpf_redirect_info *ri, > - struct net_device *dev, > + const struct net_device *dev, > struct xdp_buff *xdp, > - struct bpf_prog *xdp_prog) > + const struct bpf_prog *xdp_prog) > { > enum bpf_map_type map_type = ri->map_type; > void *fwd = ri->tgt_value; > @@ -4372,10 +4372,10 @@ static inline int __xdp_do_redirect_xsk(struct bpf_redirect_info *ri, > return err; > } > > -static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri, > - struct net_device *dev, > - struct xdp_frame *xdpf, > - struct bpf_prog *xdp_prog) > +static __always_inline int > +__xdp_do_redirect_frame(struct bpf_redirect_info *ri, struct net_device *dev, > + struct xdp_frame *xdpf, > + const struct bpf_prog *xdp_prog) > { > enum bpf_map_type map_type = ri->map_type; > void *fwd = ri->tgt_value; > @@ -4444,7 +4444,7 @@ static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri, > } > > int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, > - struct bpf_prog *xdp_prog) > + const struct bpf_prog *xdp_prog) > { > struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); > enum bpf_map_type map_type = ri->map_type; > @@ -4458,7 +4458,8 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, > EXPORT_SYMBOL_GPL(xdp_do_redirect); > > int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp, > - struct xdp_frame *xdpf, struct bpf_prog *xdp_prog) > + struct xdp_frame *xdpf, > + const struct bpf_prog *xdp_prog) > { > struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); > enum bpf_map_type map_type = ri->map_type; > @@ -4473,9 +4474,9 @@ EXPORT_SYMBOL_GPL(xdp_do_redirect_frame); > static int xdp_do_generic_redirect_map(struct net_device *dev, > struct sk_buff *skb, > struct xdp_buff *xdp, > - struct bpf_prog *xdp_prog, void *fwd, > - enum bpf_map_type map_type, u32 map_id, > - u32 flags) > + const struct bpf_prog *xdp_prog, > + void *fwd, enum bpf_map_type map_type, > + u32 map_id, u32 flags) > { > struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); > struct bpf_map *map; > @@ -4529,7 +4530,8 @@ static int xdp_do_generic_redirect_map(struct net_device *dev, > } > > int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, > - struct xdp_buff *xdp, struct bpf_prog *xdp_prog) > + struct xdp_buff *xdp, > + const struct bpf_prog *xdp_prog) > { > struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); > enum bpf_map_type map_type = ri->map_type; > @@ -9088,7 +9090,8 @@ static bool xdp_is_valid_access(int off, int size, > return __is_valid_xdp_access(off, size); > } > > -void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act) > +void bpf_warn_invalid_xdp_action(const struct net_device *dev, > + const struct bpf_prog *prog, u32 act) > { > const u32 act_max = XDP_REDIRECT; > > diff --git a/net/core/skbuff.c b/net/core/skbuff.c > index 00afeb90c23a..224cfe8b4368 100644 > --- a/net/core/skbuff.c > +++ b/net/core/skbuff.c > @@ -1009,7 +1009,7 @@ int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb, > EXPORT_SYMBOL(skb_pp_cow_data); > > int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb, > - struct bpf_prog *prog) > + const struct bpf_prog *prog) > { > if (!prog->aux->xdp_has_frags) > return -EINVAL; > -- > 2.46.2 >
From: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Date: Thu, 17 Oct 2024 13:12:58 +0200 > On Tue, Oct 15, 2024 at 04:53:36PM +0200, Alexander Lobakin wrote: >> In lots of places, bpf_prog pointer is used only for tracing or other >> stuff that doesn't modify the structure itself. Same for net_device. >> Address at least some of them and add `const` attributes there. The >> object code didn't change, but that may prevent unwanted data >> modifications and also allow more helpers to have const arguments. > > I believe that this patch is not related to idpf XDP support at all. This > could be pulled out and send separately and reduce the amount of code > jungle that one has to go through in this set ;) First of all, this series is called "core code changes". Second is that without this patch I simply can't introduce libeth_xdp in its current form, as in some functions I pass const pointers, but here they won't be const -> compile-time error. Thanks, Olek
On Mon, Oct 21, 2024 at 03:56:12PM +0200, Alexander Lobakin wrote: > From: Maciej Fijalkowski <maciej.fijalkowski@intel.com> > Date: Thu, 17 Oct 2024 13:12:58 +0200 > > > On Tue, Oct 15, 2024 at 04:53:36PM +0200, Alexander Lobakin wrote: > >> In lots of places, bpf_prog pointer is used only for tracing or other > >> stuff that doesn't modify the structure itself. Same for net_device. > >> Address at least some of them and add `const` attributes there. The > >> object code didn't change, but that may prevent unwanted data > >> modifications and also allow more helpers to have const arguments. > > > > I believe that this patch is not related to idpf XDP support at all. This > > could be pulled out and send separately and reduce the amount of code > > jungle that one has to go through in this set ;) > > First of all, this series is called "core code changes". > > Second is that without this patch I simply can't introduce libeth_xdp in > its current form, as in some functions I pass const pointers, but here > they won't be const -> compile-time error. Ok, but that is just related to your vision of libeth. From my POV it would be more convenient to send such patches earlier which would reduce the size of set. I suppose this patch has been rotting on your branch for a while and you could just publish it earlier and keep on working on libeth in the meantime. > > Thanks, > Olek
© 2016 - 2024 Red Hat, Inc.