[PATCH bpf-next v3 1/2] arm64, bpf: add internal-only MOV instruction to resolve per-CPU addrs

Puranjay Mohan posted 2 patches 1 year, 7 months ago
There is a newer version of this series
[PATCH bpf-next v3 1/2] arm64, bpf: add internal-only MOV instruction to resolve per-CPU addrs
Posted by Puranjay Mohan 1 year, 7 months ago
From: Puranjay Mohan <puranjay12@gmail.com>

Support an instruction for resolving absolute addresses of per-CPU
data from their per-CPU offsets. This instruction is internal-only and
users are not allowed to use them directly. They will only be used for
internal inlining optimizations for now between BPF verifier and BPF
JITs.

Since commit 7158627686f0 ("arm64: percpu: implement optimised pcpu
access using tpidr_el1"), the per-cpu offset for the CPU is stored in
the tpidr_el1/2 register of that CPU.

To support this BPF instruction in the ARM64 JIT, the following ARM64
instructions are emitted:

mov dst, src		// Move src to dst, if src != dst
mrs tmp, tpidr_el1/2	// Move per-cpu offset of the current cpu in tmp.
add dst, dst, tmp	// Add the per cpu offset to the dst.

To measure the performance improvement provided by this change, the
benchmark in [1] was used:

Before:
glob-arr-inc   :   23.597 ± 0.012M/s
arr-inc        :   23.173 ± 0.019M/s
hash-inc       :   12.186 ± 0.028M/s

After:
glob-arr-inc   :   23.819 ± 0.034M/s
arr-inc        :   23.285 ± 0.017M/s
hash-inc       :   12.419 ± 0.011M/s

[1] https://github.com/anakryiko/linux/commit/8dec900975ef

Signed-off-by: Puranjay Mohan <puranjay12@gmail.com>
---
 arch/arm64/include/asm/insn.h |  7 +++++++
 arch/arm64/lib/insn.c         | 11 +++++++++++
 arch/arm64/net/bpf_jit.h      |  6 ++++++
 arch/arm64/net/bpf_jit_comp.c | 14 ++++++++++++++
 4 files changed, 38 insertions(+)

diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index db1aeacd4cd9..8de0e39b29f3 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -135,6 +135,11 @@ enum aarch64_insn_special_register {
 	AARCH64_INSN_SPCLREG_SP_EL2	= 0xF210
 };
 
+enum aarch64_insn_system_register {
+	AARCH64_INSN_SYSREG_TPIDR_EL1	= 0x4684,
+	AARCH64_INSN_SYSREG_TPIDR_EL2	= 0x6682,
+};
+
 enum aarch64_insn_variant {
 	AARCH64_INSN_VARIANT_32BIT,
 	AARCH64_INSN_VARIANT_64BIT
@@ -686,6 +691,8 @@ u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
 }
 #endif
 u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type);
+u32 aarch64_insn_gen_mrs(enum aarch64_insn_register result,
+			 enum aarch64_insn_system_register sysreg);
 
 s32 aarch64_get_branch_offset(u32 insn);
 u32 aarch64_set_branch_offset(u32 insn, s32 offset);
diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c
index a635ab83fee3..b008a9b46a7f 100644
--- a/arch/arm64/lib/insn.c
+++ b/arch/arm64/lib/insn.c
@@ -1515,3 +1515,14 @@ u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
 
 	return insn;
 }
+
+u32 aarch64_insn_gen_mrs(enum aarch64_insn_register result,
+			 enum aarch64_insn_system_register sysreg)
+{
+	u32 insn = aarch64_insn_get_mrs_value();
+
+	insn &= ~GENMASK(19, 0);
+	insn |= sysreg << 5;
+	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT,
+					    insn, result);
+}
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
index 23b1b34db088..b627ef7188c7 100644
--- a/arch/arm64/net/bpf_jit.h
+++ b/arch/arm64/net/bpf_jit.h
@@ -297,4 +297,10 @@
 #define A64_ADR(Rd, offset) \
 	aarch64_insn_gen_adr(0, offset, Rd, AARCH64_INSN_ADR_TYPE_ADR)
 
+/* MRS */
+#define A64_MRS_TPIDR_EL1(Rt) \
+	aarch64_insn_gen_mrs(Rt, AARCH64_INSN_SYSREG_TPIDR_EL1)
+#define A64_MRS_TPIDR_EL2(Rt) \
+	aarch64_insn_gen_mrs(Rt, AARCH64_INSN_SYSREG_TPIDR_EL2)
+
 #endif /* _BPF_JIT_H */
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 76b91f36c729..ed8f9716d9d5 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -877,6 +877,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
 			emit(A64_ORR(1, tmp, dst, tmp), ctx);
 			emit(A64_MOV(1, dst, tmp), ctx);
 			break;
+		} else if (insn_is_mov_percpu_addr(insn)) {
+			if (dst != src)
+				emit(A64_MOV(1, dst, src), ctx);
+			if (cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN))
+				emit(A64_MRS_TPIDR_EL2(tmp), ctx);
+			else
+				emit(A64_MRS_TPIDR_EL1(tmp), ctx);
+			emit(A64_ADD(1, dst, dst, tmp), ctx);
+			break;
 		}
 		switch (insn->off) {
 		case 0:
@@ -2527,6 +2536,11 @@ bool bpf_jit_supports_arena(void)
 	return true;
 }
 
+bool bpf_jit_supports_percpu_insn(void)
+{
+	return true;
+}
+
 void bpf_jit_free(struct bpf_prog *prog)
 {
 	if (prog->jited) {
-- 
2.40.1

Re: [PATCH bpf-next v3 1/2] arm64, bpf: add internal-only MOV instruction to resolve per-CPU addrs
Posted by Andrii Nakryiko 1 year, 7 months ago
On Fri, Apr 26, 2024 at 5:14 AM Puranjay Mohan <puranjay@kernel.org> wrote:
>
> From: Puranjay Mohan <puranjay12@gmail.com>
>
> Support an instruction for resolving absolute addresses of per-CPU
> data from their per-CPU offsets. This instruction is internal-only and
> users are not allowed to use them directly. They will only be used for
> internal inlining optimizations for now between BPF verifier and BPF
> JITs.
>
> Since commit 7158627686f0 ("arm64: percpu: implement optimised pcpu
> access using tpidr_el1"), the per-cpu offset for the CPU is stored in
> the tpidr_el1/2 register of that CPU.
>
> To support this BPF instruction in the ARM64 JIT, the following ARM64
> instructions are emitted:
>
> mov dst, src            // Move src to dst, if src != dst
> mrs tmp, tpidr_el1/2    // Move per-cpu offset of the current cpu in tmp.
> add dst, dst, tmp       // Add the per cpu offset to the dst.
>
> To measure the performance improvement provided by this change, the
> benchmark in [1] was used:
>
> Before:
> glob-arr-inc   :   23.597 ± 0.012M/s
> arr-inc        :   23.173 ± 0.019M/s
> hash-inc       :   12.186 ± 0.028M/s
>
> After:
> glob-arr-inc   :   23.819 ± 0.034M/s
> arr-inc        :   23.285 ± 0.017M/s

I still expected a better improvement (global-arr-inc's results
improved more than arr-inc, which is completely different from
x86-64), but it's still a good thing to support this for arm64, of
course.

ack for generic parts I can understand:

Acked-by: Andrii Nakryiko <andrii@kernel.org>

> hash-inc       :   12.419 ± 0.011M/s
>
> [1] https://github.com/anakryiko/linux/commit/8dec900975ef
>
> Signed-off-by: Puranjay Mohan <puranjay12@gmail.com>
> ---
>  arch/arm64/include/asm/insn.h |  7 +++++++
>  arch/arm64/lib/insn.c         | 11 +++++++++++
>  arch/arm64/net/bpf_jit.h      |  6 ++++++
>  arch/arm64/net/bpf_jit_comp.c | 14 ++++++++++++++
>  4 files changed, 38 insertions(+)
>

[...]
Re: [PATCH bpf-next v3 1/2] arm64, bpf: add internal-only MOV instruction to resolve per-CPU addrs
Posted by Puranjay Mohan 1 year, 7 months ago
Andrii Nakryiko <andrii.nakryiko@gmail.com> writes:

> On Fri, Apr 26, 2024 at 5:14 AM Puranjay Mohan <puranjay@kernel.org> wrote:
>>
>> From: Puranjay Mohan <puranjay12@gmail.com>
>>
>> Support an instruction for resolving absolute addresses of per-CPU
>> data from their per-CPU offsets. This instruction is internal-only and
>> users are not allowed to use them directly. They will only be used for
>> internal inlining optimizations for now between BPF verifier and BPF
>> JITs.
>>
>> Since commit 7158627686f0 ("arm64: percpu: implement optimised pcpu
>> access using tpidr_el1"), the per-cpu offset for the CPU is stored in
>> the tpidr_el1/2 register of that CPU.
>>
>> To support this BPF instruction in the ARM64 JIT, the following ARM64
>> instructions are emitted:
>>
>> mov dst, src            // Move src to dst, if src != dst
>> mrs tmp, tpidr_el1/2    // Move per-cpu offset of the current cpu in tmp.
>> add dst, dst, tmp       // Add the per cpu offset to the dst.
>>
>> To measure the performance improvement provided by this change, the
>> benchmark in [1] was used:
>>
>> Before:
>> glob-arr-inc   :   23.597 ± 0.012M/s
>> arr-inc        :   23.173 ± 0.019M/s
>> hash-inc       :   12.186 ± 0.028M/s
>>
>> After:
>> glob-arr-inc   :   23.819 ± 0.034M/s
>> arr-inc        :   23.285 ± 0.017M/s
>
> I still expected a better improvement (global-arr-inc's results
> improved more than arr-inc, which is completely different from
> x86-64), but it's still a good thing to support this for arm64, of
> course.
>
> ack for generic parts I can understand:
>
> Acked-by: Andrii Nakryiko <andrii@kernel.org>
>

I will have to do more research to find why we don't see very high
improvement.

But this is what is happening here:

This was the complete picture before inlining:

int cpu = bpf_get_smp_processor_id();
mov     x10, #0xffffffffffffd4a8
movk    x10, #0x802c, lsl #16
movk    x10, #0x8000, lsl #32
blr     x10 ---------------------------------------> nop
                                                     nop
                                                     adrp    x0, 0xffff800082128000
                                                     mrs     x1, tpidr_el1
                                                     add     x0, x0, #0x8
                                                     ldrsw   x0, [x0, x1]
            <----------------------------------------ret
add     x7, x0, #0x0


Now we have:

int cpu = bpf_get_smp_processor_id();
mov     x7, #0xffff8000ffffffff
movk    x7, #0x8212, lsl #16
movk    x7, #0x8008
mrs     x10, tpidr_el1
add     x7, x7, x10
ldr     w7, [x7]


So, we have removed multiple instructions including a branch and a
return. I was expecting to see more improvement. This benchmark is taken
from a KVM based virtual machine, maybe if I do it on bare-metal I would
see more improvement ?

Thanks,
Puranjay
Re: [PATCH bpf-next v3 1/2] arm64, bpf: add internal-only MOV instruction to resolve per-CPU addrs
Posted by Andrii Nakryiko 1 year, 7 months ago
On Fri, Apr 26, 2024 at 9:55 AM Puranjay Mohan <puranjay@kernel.org> wrote:
>
> Andrii Nakryiko <andrii.nakryiko@gmail.com> writes:
>
> > On Fri, Apr 26, 2024 at 5:14 AM Puranjay Mohan <puranjay@kernel.org> wrote:
> >>
> >> From: Puranjay Mohan <puranjay12@gmail.com>
> >>
> >> Support an instruction for resolving absolute addresses of per-CPU
> >> data from their per-CPU offsets. This instruction is internal-only and
> >> users are not allowed to use them directly. They will only be used for
> >> internal inlining optimizations for now between BPF verifier and BPF
> >> JITs.
> >>
> >> Since commit 7158627686f0 ("arm64: percpu: implement optimised pcpu
> >> access using tpidr_el1"), the per-cpu offset for the CPU is stored in
> >> the tpidr_el1/2 register of that CPU.
> >>
> >> To support this BPF instruction in the ARM64 JIT, the following ARM64
> >> instructions are emitted:
> >>
> >> mov dst, src            // Move src to dst, if src != dst
> >> mrs tmp, tpidr_el1/2    // Move per-cpu offset of the current cpu in tmp.
> >> add dst, dst, tmp       // Add the per cpu offset to the dst.
> >>
> >> To measure the performance improvement provided by this change, the
> >> benchmark in [1] was used:
> >>
> >> Before:
> >> glob-arr-inc   :   23.597 ± 0.012M/s
> >> arr-inc        :   23.173 ± 0.019M/s
> >> hash-inc       :   12.186 ± 0.028M/s
> >>
> >> After:
> >> glob-arr-inc   :   23.819 ± 0.034M/s
> >> arr-inc        :   23.285 ± 0.017M/s
> >
> > I still expected a better improvement (global-arr-inc's results
> > improved more than arr-inc, which is completely different from
> > x86-64), but it's still a good thing to support this for arm64, of
> > course.
> >
> > ack for generic parts I can understand:
> >
> > Acked-by: Andrii Nakryiko <andrii@kernel.org>
> >
>
> I will have to do more research to find why we don't see very high
> improvement.
>
> But this is what is happening here:
>
> This was the complete picture before inlining:
>
> int cpu = bpf_get_smp_processor_id();
> mov     x10, #0xffffffffffffd4a8
> movk    x10, #0x802c, lsl #16
> movk    x10, #0x8000, lsl #32
> blr     x10 ---------------------------------------> nop
>                                                      nop
>                                                      adrp    x0, 0xffff800082128000
>                                                      mrs     x1, tpidr_el1
>                                                      add     x0, x0, #0x8
>                                                      ldrsw   x0, [x0, x1]
>             <----------------------------------------ret
> add     x7, x0, #0x0
>
>
> Now we have:
>
> int cpu = bpf_get_smp_processor_id();
> mov     x7, #0xffff8000ffffffff
> movk    x7, #0x8212, lsl #16
> movk    x7, #0x8008
> mrs     x10, tpidr_el1
> add     x7, x7, x10
> ldr     w7, [x7]
>
>
> So, we have removed multiple instructions including a branch and a
> return. I was expecting to see more improvement. This benchmark is taken
> from a KVM based virtual machine, maybe if I do it on bare-metal I would
> see more improvement ?

I see, yeah, I think it might change significantly. I remember back
from times when I was benchmarking BPF ringbuf, I was getting
very-very different results from inside QEMU vs bare metal. And I
don't mean just in absolute numbers. QEMU/KVM seems to change a lot of
things when it comes to contentions, atomic instructions, etc, etc.
Anyways, for benchmarking, always try to do bare metal.

>
> Thanks,
> Puranjay
Re: [PATCH bpf-next v3 1/2] arm64, bpf: add internal-only MOV instruction to resolve per-CPU addrs
Posted by Puranjay Mohan 1 year, 7 months ago
Andrii Nakryiko <andrii.nakryiko@gmail.com> writes:

> On Fri, Apr 26, 2024 at 9:55 AM Puranjay Mohan <puranjay@kernel.org> wrote:
>>
>> Andrii Nakryiko <andrii.nakryiko@gmail.com> writes:
>>
>> > On Fri, Apr 26, 2024 at 5:14 AM Puranjay Mohan <puranjay@kernel.org> wrote:
>> >>
>> >> From: Puranjay Mohan <puranjay12@gmail.com>
>> >>
>> >> Support an instruction for resolving absolute addresses of per-CPU
>> >> data from their per-CPU offsets. This instruction is internal-only and
>> >> users are not allowed to use them directly. They will only be used for
>> >> internal inlining optimizations for now between BPF verifier and BPF
>> >> JITs.
>> >>
>> >> Since commit 7158627686f0 ("arm64: percpu: implement optimised pcpu
>> >> access using tpidr_el1"), the per-cpu offset for the CPU is stored in
>> >> the tpidr_el1/2 register of that CPU.
>> >>
>> >> To support this BPF instruction in the ARM64 JIT, the following ARM64
>> >> instructions are emitted:
>> >>
>> >> mov dst, src            // Move src to dst, if src != dst
>> >> mrs tmp, tpidr_el1/2    // Move per-cpu offset of the current cpu in tmp.
>> >> add dst, dst, tmp       // Add the per cpu offset to the dst.
>> >>
>> >> To measure the performance improvement provided by this change, the
>> >> benchmark in [1] was used:
>> >>
>> >> Before:
>> >> glob-arr-inc   :   23.597 ± 0.012M/s
>> >> arr-inc        :   23.173 ± 0.019M/s
>> >> hash-inc       :   12.186 ± 0.028M/s
>> >>
>> >> After:
>> >> glob-arr-inc   :   23.819 ± 0.034M/s
>> >> arr-inc        :   23.285 ± 0.017M/s
>> >
>> > I still expected a better improvement (global-arr-inc's results
>> > improved more than arr-inc, which is completely different from
>> > x86-64), but it's still a good thing to support this for arm64, of
>> > course.
>> >
>> > ack for generic parts I can understand:
>> >
>> > Acked-by: Andrii Nakryiko <andrii@kernel.org>
>> >
>>
>> I will have to do more research to find why we don't see very high
>> improvement.
>>
>> But this is what is happening here:
>>
>> This was the complete picture before inlining:
>>
>> int cpu = bpf_get_smp_processor_id();
>> mov     x10, #0xffffffffffffd4a8
>> movk    x10, #0x802c, lsl #16
>> movk    x10, #0x8000, lsl #32
>> blr     x10 ---------------------------------------> nop
>>                                                      nop
>>                                                      adrp    x0, 0xffff800082128000
>>                                                      mrs     x1, tpidr_el1
>>                                                      add     x0, x0, #0x8
>>                                                      ldrsw   x0, [x0, x1]
>>             <----------------------------------------ret
>> add     x7, x0, #0x0
>>
>>
>> Now we have:
>>
>> int cpu = bpf_get_smp_processor_id();
>> mov     x7, #0xffff8000ffffffff
>> movk    x7, #0x8212, lsl #16
>> movk    x7, #0x8008
>> mrs     x10, tpidr_el1
>> add     x7, x7, x10
>> ldr     w7, [x7]
>>
>>
>> So, we have removed multiple instructions including a branch and a
>> return. I was expecting to see more improvement. This benchmark is taken
>> from a KVM based virtual machine, maybe if I do it on bare-metal I would
>> see more improvement ?
>
> I see, yeah, I think it might change significantly. I remember back
> from times when I was benchmarking BPF ringbuf, I was getting
> very-very different results from inside QEMU vs bare metal. And I
> don't mean just in absolute numbers. QEMU/KVM seems to change a lot of
> things when it comes to contentions, atomic instructions, etc, etc.
> Anyways, for benchmarking, always try to do bare metal.
>

I found the solution to this. I am seeing much better performance when
implementing this inlining in the JIT through another method, similar to
what I did for riscv see[1]

[1] https://lore.kernel.org/all/20240430175834.33152-3-puranjay@kernel.org/

Will do the same for ARM64 in V5 of this series.

Thanks,
Puranjay