LLVM generates bpf_addr_space_cast instruction while translating
pointers between native (zero) address space and
__attribute__((address_space(N))). The addr_space=0 is reserved as
bpf_arena address space.
rY = addr_space_cast(rX, 0, 1) is processed by the verifier and
converted to normal 32-bit move: wX = wY.
rY = addr_space_cast(rX, 1, 0) : used to convert a bpf arena pointer to
a pointer in the userspace vma. This has to be converted by the JIT.
Signed-off-by: Saket Kumar Bhaskar <skb99@linux.ibm.com>
---
arch/powerpc/net/bpf_jit.h | 1 +
arch/powerpc/net/bpf_jit_comp.c | 6 ++++++
arch/powerpc/net/bpf_jit_comp64.c | 11 +++++++++++
3 files changed, 18 insertions(+)
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 2d095a873305..748e30e8b5b4 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -165,6 +165,7 @@ struct codegen_context {
unsigned int exentry_idx;
unsigned int alt_exit_addr;
u64 arena_vm_start;
+ u64 user_vm_start;
};
#define bpf_to_ppc(r) (ctx->b2p[r])
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 35bfdf4d8785..2b3f90930c27 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -205,6 +205,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
/* Make sure that the stack is quadword aligned. */
cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
cgctx.arena_vm_start = bpf_arena_get_kern_vm_start(fp->aux->arena);
+ cgctx.user_vm_start = bpf_arena_get_user_vm_start(fp->aux->arena);
/* Scouting faux-generate pass 0 */
if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
@@ -441,6 +442,11 @@ bool bpf_jit_supports_kfunc_call(void)
return true;
}
+bool bpf_jit_supports_arena(void)
+{
+ return IS_ENABLED(CONFIG_PPC64);
+}
+
bool bpf_jit_supports_far_kfunc_call(void)
{
return IS_ENABLED(CONFIG_PPC64);
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 16e62766c757..d4fe4dacf2d6 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -812,6 +812,17 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
*/
case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
+
+ if (insn_is_cast_user(&insn[i])) {
+ EMIT(PPC_RAW_RLDICL(tmp1_reg, src_reg, 0, 32));
+ PPC_LI64(dst_reg, (ctx->user_vm_start & 0xffffffff00000000UL));
+ EMIT(PPC_RAW_CMPDI(tmp1_reg, 0));
+ PPC_BCC_SHORT(COND_EQ, (ctx->idx + 2) * 4);
+ EMIT(PPC_RAW_OR(tmp1_reg, dst_reg, tmp1_reg));
+ EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
+ break;
+ }
+
if (imm == 1) {
/* special mov32 for zext */
EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
--
2.43.5
Le 05/08/2025 à 08:27, Saket Kumar Bhaskar a écrit :
> LLVM generates bpf_addr_space_cast instruction while translating
> pointers between native (zero) address space and
> __attribute__((address_space(N))). The addr_space=0 is reserved as
> bpf_arena address space.
>
> rY = addr_space_cast(rX, 0, 1) is processed by the verifier and
> converted to normal 32-bit move: wX = wY.
>
> rY = addr_space_cast(rX, 1, 0) : used to convert a bpf arena pointer to
> a pointer in the userspace vma. This has to be converted by the JIT.
>
> Signed-off-by: Saket Kumar Bhaskar <skb99@linux.ibm.com>
> ---
> arch/powerpc/net/bpf_jit.h | 1 +
> arch/powerpc/net/bpf_jit_comp.c | 6 ++++++
> arch/powerpc/net/bpf_jit_comp64.c | 11 +++++++++++
> 3 files changed, 18 insertions(+)
>
> diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
> index 2d095a873305..748e30e8b5b4 100644
> --- a/arch/powerpc/net/bpf_jit.h
> +++ b/arch/powerpc/net/bpf_jit.h
> @@ -165,6 +165,7 @@ struct codegen_context {
> unsigned int exentry_idx;
> unsigned int alt_exit_addr;
> u64 arena_vm_start;
> + u64 user_vm_start;
> };
>
> #define bpf_to_ppc(r) (ctx->b2p[r])
> diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
> index 35bfdf4d8785..2b3f90930c27 100644
> --- a/arch/powerpc/net/bpf_jit_comp.c
> +++ b/arch/powerpc/net/bpf_jit_comp.c
> @@ -205,6 +205,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
> /* Make sure that the stack is quadword aligned. */
> cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
> cgctx.arena_vm_start = bpf_arena_get_kern_vm_start(fp->aux->arena);
> + cgctx.user_vm_start = bpf_arena_get_user_vm_start(fp->aux->arena);
>
> /* Scouting faux-generate pass 0 */
> if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
> @@ -441,6 +442,11 @@ bool bpf_jit_supports_kfunc_call(void)
> return true;
> }
>
> +bool bpf_jit_supports_arena(void)
> +{
> + return IS_ENABLED(CONFIG_PPC64);
> +}
> +
> bool bpf_jit_supports_far_kfunc_call(void)
> {
> return IS_ENABLED(CONFIG_PPC64);
> diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
> index 16e62766c757..d4fe4dacf2d6 100644
> --- a/arch/powerpc/net/bpf_jit_comp64.c
> +++ b/arch/powerpc/net/bpf_jit_comp64.c
> @@ -812,6 +812,17 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
> */
> case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
> case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
> +
> + if (insn_is_cast_user(&insn[i])) {
> + EMIT(PPC_RAW_RLDICL(tmp1_reg, src_reg, 0, 32));
Define and use PPC_RAW_RLDICL_DOT to avoid the CMPDI below.
> + PPC_LI64(dst_reg, (ctx->user_vm_start & 0xffffffff00000000UL));
> + EMIT(PPC_RAW_CMPDI(tmp1_reg, 0));
> + PPC_BCC_SHORT(COND_EQ, (ctx->idx + 2) * 4);
> + EMIT(PPC_RAW_OR(tmp1_reg, dst_reg, tmp1_reg));
> + EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
> + break;
> + }
> +
> if (imm == 1) {
> /* special mov32 for zext */
> EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
On Tue, Aug 05, 2025 at 09:29:07AM +0200, Christophe Leroy wrote:
>
>
> Le 05/08/2025 à 08:27, Saket Kumar Bhaskar a écrit :
> > LLVM generates bpf_addr_space_cast instruction while translating
> > pointers between native (zero) address space and
> > __attribute__((address_space(N))). The addr_space=0 is reserved as
> > bpf_arena address space.
> >
> > rY = addr_space_cast(rX, 0, 1) is processed by the verifier and
> > converted to normal 32-bit move: wX = wY.
> >
> > rY = addr_space_cast(rX, 1, 0) : used to convert a bpf arena pointer to
> > a pointer in the userspace vma. This has to be converted by the JIT.
> >
> > Signed-off-by: Saket Kumar Bhaskar <skb99@linux.ibm.com>
> > ---
> > arch/powerpc/net/bpf_jit.h | 1 +
> > arch/powerpc/net/bpf_jit_comp.c | 6 ++++++
> > arch/powerpc/net/bpf_jit_comp64.c | 11 +++++++++++
> > 3 files changed, 18 insertions(+)
> >
> > diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
> > index 2d095a873305..748e30e8b5b4 100644
> > --- a/arch/powerpc/net/bpf_jit.h
> > +++ b/arch/powerpc/net/bpf_jit.h
> > @@ -165,6 +165,7 @@ struct codegen_context {
> > unsigned int exentry_idx;
> > unsigned int alt_exit_addr;
> > u64 arena_vm_start;
> > + u64 user_vm_start;
> > };
> > #define bpf_to_ppc(r) (ctx->b2p[r])
> > diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
> > index 35bfdf4d8785..2b3f90930c27 100644
> > --- a/arch/powerpc/net/bpf_jit_comp.c
> > +++ b/arch/powerpc/net/bpf_jit_comp.c
> > @@ -205,6 +205,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
> > /* Make sure that the stack is quadword aligned. */
> > cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
> > cgctx.arena_vm_start = bpf_arena_get_kern_vm_start(fp->aux->arena);
> > + cgctx.user_vm_start = bpf_arena_get_user_vm_start(fp->aux->arena);
> > /* Scouting faux-generate pass 0 */
> > if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
> > @@ -441,6 +442,11 @@ bool bpf_jit_supports_kfunc_call(void)
> > return true;
> > }
> > +bool bpf_jit_supports_arena(void)
> > +{
> > + return IS_ENABLED(CONFIG_PPC64);
> > +}
> > +
> > bool bpf_jit_supports_far_kfunc_call(void)
> > {
> > return IS_ENABLED(CONFIG_PPC64);
> > diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
> > index 16e62766c757..d4fe4dacf2d6 100644
> > --- a/arch/powerpc/net/bpf_jit_comp64.c
> > +++ b/arch/powerpc/net/bpf_jit_comp64.c
> > @@ -812,6 +812,17 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
> > */
> > case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
> > case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
> > +
> > + if (insn_is_cast_user(&insn[i])) {
> > + EMIT(PPC_RAW_RLDICL(tmp1_reg, src_reg, 0, 32));
>
> Define and use PPC_RAW_RLDICL_DOT to avoid the CMPDI below.
>
Alright Chris, will define and implement it here.
> > + PPC_LI64(dst_reg, (ctx->user_vm_start & 0xffffffff00000000UL));
> > + EMIT(PPC_RAW_CMPDI(tmp1_reg, 0));
> > + PPC_BCC_SHORT(COND_EQ, (ctx->idx + 2) * 4);
> > + EMIT(PPC_RAW_OR(tmp1_reg, dst_reg, tmp1_reg));
> > + EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
> > + break;
> > + }
> > +
> > if (imm == 1) {
> > /* special mov32 for zext */
> > EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
>
© 2016 - 2026 Red Hat, Inc.