From nobody Sun Feb 8 10:33:20 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 16064EB64DB for ; Thu, 22 Jun 2023 07:57:48 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230422AbjFVH5q (ORCPT ); Thu, 22 Jun 2023 03:57:46 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:55184 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230410AbjFVH5k (ORCPT ); Thu, 22 Jun 2023 03:57:40 -0400 Received: from mail-pl1-x641.google.com (mail-pl1-x641.google.com [IPv6:2607:f8b0:4864:20::641]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id C215A19AD; Thu, 22 Jun 2023 00:57:37 -0700 (PDT) Received: by mail-pl1-x641.google.com with SMTP id d9443c01a7336-1b6824141b4so2563275ad.1; Thu, 22 Jun 2023 00:57:37 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20221208; t=1687420657; x=1690012657; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=9UQHgCYMSUpJKwkxCT8Gz4j2ViWlUBZ9rAr/225IOUc=; b=g0no2cPqfVO8xjN017b6JROAufdBBjR1Wdd9+yaqpm7b1ckjHVlwIfzaDz1E9HPV0l BWjLl/79hf2y9EMezQlltv4MobG+gDt0uAKZoMFldLuDhlpONHoBh53ENcUgJeb9z/jm pGGCycbtFfex++XRk5PVmyFqkzzs67xPkzv1NG2Dhl0R1e/wCw7P1kJ+LuE2rxDpT0lM Fc4ulp9RGi3m3p43s2j8XZoqmH909zD6xAbFd+cGqV2nwyqSBz0FbjTXxBPkec8pTjmL RE+d6P2ef4OpdNrWH5HY3BReblfUgfU87YWBgz/1VxfnI4vWML6hHRigP2ATi4tF7so4 nzsQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20221208; t=1687420657; x=1690012657; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=9UQHgCYMSUpJKwkxCT8Gz4j2ViWlUBZ9rAr/225IOUc=; b=dc/vYIgoSvprdsRTRbS/GoSiLc32BB0tJrkZJjyR6GZg26X63A9byjGScpy6WPTNTI Y9aLHo3rGYSvn34E3aAxJKFw8/0FrXPIuAIjC0do5vTerzxdWn9KwqcmDtBfKD9OGV61 WcrLTx1+WrHEdMluhVJKo2L48UOk2lHqf6pD9Phuaeo+WXX2NvtMffNCLhYgNSJpKjjg JZbgVScWqfIphctqgA4unoUKQijQhA7RiJkhCDn0OgnN2djC2Cy390MhVWJPnx9N54Fa iW+z4xR1KuMvsqO2KFCChuXvlAwPP3pQg1m3wreHXK8XEKHEpuMGGIicMyOrZ4Z/4RhT Fsrg== X-Gm-Message-State: AC+VfDww5/IMHskUMyWu7k2874aWAIx1m9gl0q/q1hQxwzn0J4mdX3DQ MdrxHzdEsMZ+O8qK/npExYM= X-Google-Smtp-Source: ACHHUZ6ltY3XaVWNR0C/rO52PJJq6Yfrrgix/SXdo9NQ6iV0nH4+8LBx7MIhABu+AUx/J6QTh00M3Q== X-Received: by 2002:a17:902:db04:b0:1b6:4bc2:74bc with SMTP id m4-20020a170902db0400b001b64bc274bcmr15584703plx.2.1687420657080; Thu, 22 Jun 2023 00:57:37 -0700 (PDT) Received: from localhost.localdomain ([203.205.141.82]) by smtp.gmail.com with ESMTPSA id iz3-20020a170902ef8300b001b3e84240b4sm4733338plb.67.2023.06.22.00.57.33 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Thu, 22 Jun 2023 00:57:36 -0700 (PDT) From: menglong8.dong@gmail.com X-Google-Original-From: imagedong@tencent.com To: yhs@meta.com, alexei.starovoitov@gmail.com Cc: ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@linux.dev, song@kernel.org, yhs@fb.com, john.fastabend@gmail.com, kpsingh@kernel.org, sdf@google.com, haoluo@google.com, jolsa@kernel.org, benbjiang@tencent.com, bpf@vger.kernel.org, linux-kernel@vger.kernel.org, Menglong Dong Subject: [PATCH bpf-next v7 1/3] bpf, x86: save/restore regs with BPF_DW size Date: Thu, 22 Jun 2023 15:57:13 +0800 Message-Id: <20230622075715.1818144-2-imagedong@tencent.com> X-Mailer: git-send-email 2.40.1 In-Reply-To: <20230622075715.1818144-1-imagedong@tencent.com> References: <20230622075715.1818144-1-imagedong@tencent.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: Menglong Dong As we already reserve 8 byte in the stack for each reg, it is ok to store/restore the regs in BPF_DW size. This will make the code in save_regs()/restore_regs() simpler. Signed-off-by: Menglong Dong Acked-by: Yonghong Song --- v6: - adjust the commit log --- arch/x86/net/bpf_jit_comp.c | 35 ++++++----------------------------- 1 file changed, 6 insertions(+), 29 deletions(-) diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 1056bbf55b17..a407fbbffecd 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1860,57 +1860,34 @@ st: if (is_imm8(insn->off)) static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_re= gs, int stack_size) { - int i, j, arg_size; - bool next_same_struct =3D false; + int i; =20 /* Store function arguments to stack. * For a function that accepts two pointers the sequence will be: * mov QWORD PTR [rbp-0x10],rdi * mov QWORD PTR [rbp-0x8],rsi */ - for (i =3D 0, j =3D 0; i < min(nr_regs, 6); i++) { - /* The arg_size is at most 16 bytes, enforced by the verifier. */ - arg_size =3D m->arg_size[j]; - if (arg_size > 8) { - arg_size =3D 8; - next_same_struct =3D !next_same_struct; - } - - emit_stx(prog, bytes_to_bpf_size(arg_size), - BPF_REG_FP, + for (i =3D 0; i < min(nr_regs, 6); i++) + emit_stx(prog, BPF_DW, BPF_REG_FP, i =3D=3D 5 ? X86_REG_R9 : BPF_REG_1 + i, -(stack_size - i * 8)); - - j =3D next_same_struct ? j : j + 1; - } } =20 static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr= _regs, int stack_size) { - int i, j, arg_size; - bool next_same_struct =3D false; + int i; =20 /* Restore function arguments from stack. * For a function that accepts two pointers the sequence will be: * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10] * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8] */ - for (i =3D 0, j =3D 0; i < min(nr_regs, 6); i++) { - /* The arg_size is at most 16 bytes, enforced by the verifier. */ - arg_size =3D m->arg_size[j]; - if (arg_size > 8) { - arg_size =3D 8; - next_same_struct =3D !next_same_struct; - } - - emit_ldx(prog, bytes_to_bpf_size(arg_size), + for (i =3D 0; i < min(nr_regs, 6); i++) + emit_ldx(prog, BPF_DW, i =3D=3D 5 ? X86_REG_R9 : BPF_REG_1 + i, BPF_REG_FP, -(stack_size - i * 8)); - - j =3D next_same_struct ? j : j + 1; - } } =20 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, --=20 2.40.1 From nobody Sun Feb 8 10:33:20 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id EFEE9EB64D8 for ; Thu, 22 Jun 2023 07:58:03 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230500AbjFVH6C (ORCPT ); Thu, 22 Jun 2023 03:58:02 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:55404 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230364AbjFVH5t (ORCPT ); Thu, 22 Jun 2023 03:57:49 -0400 Received: from mail-pl1-x641.google.com (mail-pl1-x641.google.com [IPv6:2607:f8b0:4864:20::641]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 5D6541FE6; Thu, 22 Jun 2023 00:57:42 -0700 (PDT) Received: by mail-pl1-x641.google.com with SMTP id d9443c01a7336-1b5251e5774so34230835ad.1; Thu, 22 Jun 2023 00:57:42 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20221208; t=1687420662; x=1690012662; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=oomXNIYbXuRlYHkS3Ip9w1mB8xpX1TPCSZ+2PzWA9QM=; b=EmSdBwD6wplTIPHP1Pvky4WzIiEmT3I4slpiiUGaTSQp/EwPF+SzefCTCqo7zrrEaI lF+vf3FBRXgu5Z7GxV/GRc2mZiE2Qjc6cPuekUK5EyNUHaywRuog9zoyBY4tV6P4gw6b wJBlnX7fMHnTMGqeTmw8iTmWWyl0N52tGTVefKWo6vKR6nSUUCV9zjnVbTMC49B/RXLV gU64HDl/LXv8bI4QWGtl85fbNhzskvinO82OX3v8klJDseUP85Kh05TNqA/RqPzO8CbY WOHWqG+Rh/bxbgLF9wf01UP/qLye5Uq0xx91RzZ35ZvDUNZzDEF3lZfh/agMdnRqSSui irwA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20221208; t=1687420662; x=1690012662; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=oomXNIYbXuRlYHkS3Ip9w1mB8xpX1TPCSZ+2PzWA9QM=; b=P56DThOXQloeIGSuiDYywq/WEvf5xMXdOj2LfrGF4QsgEvGJKKpDbX/W+FLc2NiudE vSK3fVfmYDymG9qIwkG5U2ZyEkfhbkB375BeKveQeVm2dx8bh4Rg5RJgaIXnLSySxf9C epiol3p8r4SpVNxB7Y/H4HZrMz0jBXGkUVHyoCuCMoc/r+TVjGAp3urejo4VO2DYiLWU vuATSdn2GYvDeP18oScrBafJSnoNesCDCE0A0DxTSiLydt/RIW7Mq/Airq7pLvjJnPGp ElPhsxLMK8z4LooL3hbdsgoQgfCUwTjnMiyGbaxpsSelKInYld2zVlGDgbtqebVE/ur6 6aMA== X-Gm-Message-State: AC+VfDwTQ6PaZylSZXIsWQoGGIcTHEyMHqhH9qLoJdURcTuvSuhxxSsh YPenUcZ/4owRZinyxuHSaxk= X-Google-Smtp-Source: ACHHUZ5+3CfrFhIiDIszxvDjIZ+kDZT6GrIHzkKXjYP3ENWyUMNYljceVdNpxcvgheoNjtdI74NBsQ== X-Received: by 2002:a17:902:8206:b0:1ac:8062:4f31 with SMTP id x6-20020a170902820600b001ac80624f31mr6884720pln.37.1687420661675; Thu, 22 Jun 2023 00:57:41 -0700 (PDT) Received: from localhost.localdomain ([203.205.141.82]) by smtp.gmail.com with ESMTPSA id iz3-20020a170902ef8300b001b3e84240b4sm4733338plb.67.2023.06.22.00.57.37 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Thu, 22 Jun 2023 00:57:41 -0700 (PDT) From: menglong8.dong@gmail.com X-Google-Original-From: imagedong@tencent.com To: yhs@meta.com, alexei.starovoitov@gmail.com Cc: ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@linux.dev, song@kernel.org, yhs@fb.com, john.fastabend@gmail.com, kpsingh@kernel.org, sdf@google.com, haoluo@google.com, jolsa@kernel.org, benbjiang@tencent.com, bpf@vger.kernel.org, linux-kernel@vger.kernel.org, Menglong Dong Subject: [PATCH bpf-next v7 2/3] bpf, x86: allow function arguments up to 12 for TRACING Date: Thu, 22 Jun 2023 15:57:14 +0800 Message-Id: <20230622075715.1818144-3-imagedong@tencent.com> X-Mailer: git-send-email 2.40.1 In-Reply-To: <20230622075715.1818144-1-imagedong@tencent.com> References: <20230622075715.1818144-1-imagedong@tencent.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: Menglong Dong For now, the BPF program of type BPF_PROG_TYPE_TRACING can only be used on the kernel functions whose arguments count less than or equal to 6, if not considering '> 8 bytes' struct argument. This is not friendly at all, as too many functions have arguments count more than 6. According to the current kernel version, below is a statistics of the function arguments count: argument count | function count 7 | 704 8 | 270 9 | 84 10 | 47 11 | 47 12 | 27 13 | 22 14 | 5 15 | 0 16 | 1 Therefore, let's enhance it by increasing the function arguments count allowed in arch_prepare_bpf_trampoline(), for now, only x86_64. For the case that we don't need to call origin function, which means without BPF_TRAMP_F_CALL_ORIG, we need only copy the function arguments that stored in the frame of the caller to current frame. The 7th and later arguments are stored in "$rbp + 0x18", and they will be copied to the stack area following where register values are saved. For the case with BPF_TRAMP_F_CALL_ORIG, we need prepare the arguments in stack before call origin function, which means we need alloc extra "8 * (arg_count - 6)" memory in the top of the stack. Note, there should not be any data be pushed to the stack before calling the origin function. So 'rbx' value will be stored on a stack position higher than where stack arguments are stored for BPF_TRAMP_F_CALL_ORIG. According to the research of Yonghong, struct members should be all in register or all on the stack. Meanwhile, the compiler will pass the argument on regs if the remaining regs can hold the argument. Therefore, we need save the arguments in order. Otherwise, disorder of the args can happen. For example: struct foo_struct { long a; int b; }; int foo(char, char, char, char, char, struct foo_struct, char); the arg1-5,arg7 will be passed by regs, and arg6 will by stack. Therefore, we should save/restore the arguments in the same order with the declaration of foo(). And the args used as ctx in stack will be like this: reg_arg6 -- copy from regs stack_arg2 -- copy from stack stack_arg1 reg_arg5 -- copy from regs reg_arg4 reg_arg3 reg_arg2 reg_arg1 We use EMIT3_off32() or EMIT4() for "lea" and "sub". The range of the imm in "lea" and "sub" is [-128, 127] if EMIT4() is used. Therefore, we use EMIT3_off32() instead if the imm out of the range. It works well for the FENTRY/FEXIT/MODIFY_RETURN. Signed-off-by: Menglong Dong Acked-by: Yonghong Song --- v7: - somit nits from commit message and comment - remove the inline in get_nr_regs() - rename some function and various v6: - introduce get_nr_regs() to get the space that used to pass args on stack correct - rename some args and fix some spelling mistake v5: - consider the case of the struct in arguments can't be hold by regs v4: - make the stack 16-byte aligned if passing args on-stack is needed - add the function arguments statistics to the commit log v3: - use EMIT3_off32() for "lea" and "sub" only on necessary - make 12 as the maximum arguments count v2: - instead EMIT4 with EMIT3_off32 for "lea" to prevent overflow - make MAX_BPF_FUNC_ARGS as the maximum argument count --- arch/x86/net/bpf_jit_comp.c | 235 ++++++++++++++++++++++++++++++++---- 1 file changed, 209 insertions(+), 26 deletions(-) diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index a407fbbffecd..08608284041a 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1857,37 +1857,178 @@ st: if (is_imm8(insn->off)) return proglen; } =20 -static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_re= gs, - int stack_size) +static void clean_stack_garbage(const struct btf_func_model *m, + u8 **pprog, int nr_stack_slots, + int stack_size) { - int i; + int arg_size, off; + u8 *prog; + + /* Generally speaking, the compiler will pass the arguments + * on-stack with "push" instruction, which will take 8-byte + * on the stack. In this case, there won't be garbage values + * while we copy the arguments from origin stack frame to current + * in BPF_DW. + * + * However, sometimes the compiler will only allocate 4-byte on + * the stack for the arguments. For now, this case will only + * happen if there is only one argument on-stack and its size + * not more than 4 byte. In this case, there will be garbage + * values on the upper 4-byte where we store the argument on + * current stack frame. + * + * arguments on origin stack: + * + * stack_arg_1(4-byte) xxx(4-byte) + * + * what we copy: + * + * stack_arg_1(8-byte): stack_arg_1(origin) xxx + * + * and the xxx is the garbage values which we should clean here. + */ + if (nr_stack_slots !=3D 1) + return; + + /* the size of the last argument */ + arg_size =3D m->arg_size[m->nr_args - 1]; + if (arg_size <=3D 4) { + off =3D -(stack_size - 4); + prog =3D *pprog; + /* mov DWORD PTR [rbp + off], 0 */ + if (!is_imm8(off)) + EMIT2_off32(0xC7, 0x85, off); + else + EMIT3(0xC7, 0x45, off); + EMIT(0, 4); + *pprog =3D prog; + } +} + +/* get the count of the regs that are used to pass arguments */ +static int get_nr_used_regs(const struct btf_func_model *m) +{ + int i, arg_regs, nr_used_regs =3D 0; + + for (i =3D 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { + arg_regs =3D (m->arg_size[i] + 7) / 8; + if (nr_used_regs + arg_regs <=3D 6) + nr_used_regs +=3D arg_regs; + + if (nr_used_regs >=3D 6) + break; + } + + return nr_used_regs; +} + +static void save_args(const struct btf_func_model *m, u8 **prog, + int stack_size, bool for_call_origin) +{ + int arg_regs, first_off, nr_regs =3D 0, nr_stack_slots =3D 0; + int i, j; =20 /* Store function arguments to stack. * For a function that accepts two pointers the sequence will be: * mov QWORD PTR [rbp-0x10],rdi * mov QWORD PTR [rbp-0x8],rsi */ - for (i =3D 0; i < min(nr_regs, 6); i++) - emit_stx(prog, BPF_DW, BPF_REG_FP, - i =3D=3D 5 ? X86_REG_R9 : BPF_REG_1 + i, - -(stack_size - i * 8)); + for (i =3D 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { + arg_regs =3D (m->arg_size[i] + 7) / 8; + + /* According to the research of Yonghong, struct members + * should be all in register or all on the stack. + * Meanwhile, the compiler will pass the argument on regs + * if the remaining regs can hold the argument. + * + * Disorder of the args can happen. For example: + * + * struct foo_struct { + * long a; + * int b; + * }; + * int foo(char, char, char, char, char, struct foo_struct, + * char); + * + * the arg1-5,arg7 will be passed by regs, and arg6 will + * by stack. + */ + if (nr_regs + arg_regs > 6) { + /* copy function arguments from origin stack frame + * into current stack frame. + * + * The starting address of the arguments on-stack + * is: + * rbp + 8(push rbp) + + * 8(return addr of origin call) + + * 8(return addr of the caller) + * which means: rbp + 24 + */ + for (j =3D 0; j < arg_regs; j++) { + emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP, + nr_stack_slots * 8 + 0x18); + emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0, + -stack_size); + + if (!nr_stack_slots) + first_off =3D stack_size; + stack_size -=3D 8; + nr_stack_slots++; + } + } else { + /* Only copy the arguments on-stack to current + * 'stack_size' and ignore the regs, used to + * prepare the arguments on-stack for orign call. + */ + if (for_call_origin) { + nr_regs +=3D arg_regs; + continue; + } + + /* copy the arguments from regs into stack */ + for (j =3D 0; j < arg_regs; j++) { + emit_stx(prog, BPF_DW, BPF_REG_FP, + nr_regs =3D=3D 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs, + -stack_size); + stack_size -=3D 8; + nr_regs++; + } + } + } + + clean_stack_garbage(m, prog, nr_stack_slots, first_off); } =20 -static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr= _regs, +static void restore_regs(const struct btf_func_model *m, u8 **prog, int stack_size) { - int i; + int i, j, arg_regs, nr_regs =3D 0; =20 /* Restore function arguments from stack. * For a function that accepts two pointers the sequence will be: * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10] * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8] + * + * The logic here is similar to what we do in save_args() */ - for (i =3D 0; i < min(nr_regs, 6); i++) - emit_ldx(prog, BPF_DW, - i =3D=3D 5 ? X86_REG_R9 : BPF_REG_1 + i, - BPF_REG_FP, - -(stack_size - i * 8)); + for (i =3D 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { + arg_regs =3D (m->arg_size[i] + 7) / 8; + if (nr_regs + arg_regs <=3D 6) { + for (j =3D 0; j < arg_regs; j++) { + emit_ldx(prog, BPF_DW, + nr_regs =3D=3D 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs, + BPF_REG_FP, + -stack_size); + stack_size -=3D 8; + nr_regs++; + } + } else { + stack_size -=3D 8 * arg_regs; + } + + if (nr_regs >=3D 6) + break; + } } =20 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, @@ -1915,7 +2056,10 @@ static int invoke_bpf_prog(const struct btf_func_mod= el *m, u8 **pprog, /* arg1: mov rdi, progs[i] */ emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); /* arg2: lea rsi, [rbp - ctx_cookie_off] */ - EMIT4(0x48, 0x8D, 0x75, -run_ctx_off); + if (!is_imm8(-run_ctx_off)) + EMIT3_off32(0x48, 0x8D, 0xB5, -run_ctx_off); + else + EMIT4(0x48, 0x8D, 0x75, -run_ctx_off); =20 if (emit_rsb_call(&prog, bpf_trampoline_enter(p), prog)) return -EINVAL; @@ -1931,7 +2075,10 @@ static int invoke_bpf_prog(const struct btf_func_mod= el *m, u8 **pprog, emit_nops(&prog, 2); =20 /* arg1: lea rdi, [rbp - stack_size] */ - EMIT4(0x48, 0x8D, 0x7D, -stack_size); + if (!is_imm8(-stack_size)) + EMIT3_off32(0x48, 0x8D, 0xBD, -stack_size); + else + EMIT4(0x48, 0x8D, 0x7D, -stack_size); /* arg2: progs[i]->insnsi for interpreter */ if (!p->jited) emit_mov_imm64(&prog, BPF_REG_2, @@ -1961,7 +2108,10 @@ static int invoke_bpf_prog(const struct btf_func_mod= el *m, u8 **pprog, /* arg2: mov rsi, rbx <- start time in nsec */ emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); /* arg3: lea rdx, [rbp - run_ctx_off] */ - EMIT4(0x48, 0x8D, 0x55, -run_ctx_off); + if (!is_imm8(-run_ctx_off)) + EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off); + else + EMIT4(0x48, 0x8D, 0x55, -run_ctx_off); if (emit_rsb_call(&prog, bpf_trampoline_exit(p), prog)) return -EINVAL; =20 @@ -2113,7 +2263,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_imag= e *im, void *image, void *i void *func_addr) { int i, ret, nr_regs =3D m->nr_args, stack_size =3D 0; - int regs_off, nregs_off, ip_off, run_ctx_off; + int regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, rbx_off; struct bpf_tramp_links *fentry =3D &tlinks[BPF_TRAMP_FENTRY]; struct bpf_tramp_links *fexit =3D &tlinks[BPF_TRAMP_FEXIT]; struct bpf_tramp_links *fmod_ret =3D &tlinks[BPF_TRAMP_MODIFY_RETURN]; @@ -2127,8 +2277,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_ima= ge *im, void *image, void *i if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) nr_regs +=3D (m->arg_size[i] + 7) / 8 - 1; =20 - /* x86-64 supports up to 6 arguments. 7+ can be added in the future */ - if (nr_regs > 6) + /* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6 + * are passed through regs, the remains are through stack. + */ + if (nr_regs > MAX_BPF_FUNC_ARGS) return -ENOTSUPP; =20 /* Generated trampoline stack layout: @@ -2147,7 +2299,14 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_ima= ge *im, void *image, void *i * * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag * + * RBP - rbx_off [ rbx value ] always + * * RBP - run_ctx_off [ bpf_tramp_run_ctx ] + * + * [ stack_argN ] BPF_TRAMP_F_CALL_ORIG + * [ ... ] + * [ stack_arg2 ] + * RBP - arg_stack_off [ stack_arg1 ] */ =20 /* room for return value of orig_call or fentry prog */ @@ -2167,9 +2326,26 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_ima= ge *im, void *image, void *i =20 ip_off =3D stack_size; =20 + stack_size +=3D 8; + rbx_off =3D stack_size; + stack_size +=3D (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7; run_ctx_off =3D stack_size; =20 + if (nr_regs > 6 && (flags & BPF_TRAMP_F_CALL_ORIG)) { + /* the space that used to pass arguments on-stack */ + stack_size +=3D (nr_regs - get_nr_used_regs(m)) * 8; + /* make sure the stack pointer is 16-byte aligned if we + * need pass arguments on stack, which means + * [stack_size + 8(rbp) + 8(rip) + 8(origin rip)] + * should be 16-byte aligned. Following code depend on + * that stack_size is already 8-byte aligned. + */ + stack_size +=3D (stack_size % 16) ? 0 : 8; + } + + arg_stack_off =3D stack_size; + if (flags & BPF_TRAMP_F_SKIP_FRAME) { /* skip patched call instruction and point orig_call to actual * body of the kernel function. @@ -2189,8 +2365,14 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_ima= ge *im, void *image, void *i x86_call_depth_emit_accounting(&prog, NULL); EMIT1(0x55); /* push rbp */ EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ - EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */ - EMIT1(0x53); /* push rbx */ + if (!is_imm8(stack_size)) + /* sub rsp, stack_size */ + EMIT3_off32(0x48, 0x81, 0xEC, stack_size); + else + /* sub rsp, stack_size */ + EMIT4(0x48, 0x83, 0xEC, stack_size); + /* mov QWORD PTR [rbp - rbx_off], rbx */ + emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off); =20 /* Store number of argument registers of the traced function: * mov rax, nr_regs @@ -2208,7 +2390,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_imag= e *im, void *image, void *i emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off); } =20 - save_regs(m, &prog, nr_regs, regs_off); + save_args(m, &prog, regs_off, false); =20 if (flags & BPF_TRAMP_F_CALL_ORIG) { /* arg1: mov rdi, im */ @@ -2238,7 +2420,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_imag= e *im, void *image, void *i } =20 if (flags & BPF_TRAMP_F_CALL_ORIG) { - restore_regs(m, &prog, nr_regs, regs_off); + restore_regs(m, &prog, regs_off); + save_args(m, &prog, arg_stack_off, true); =20 if (flags & BPF_TRAMP_F_ORIG_STACK) { emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8); @@ -2279,7 +2462,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_imag= e *im, void *image, void *i } =20 if (flags & BPF_TRAMP_F_RESTORE_REGS) - restore_regs(m, &prog, nr_regs, regs_off); + restore_regs(m, &prog, regs_off); =20 /* This needs to be done regardless. If there were fmod_ret programs, * the return value is only updated on the stack and still needs to be @@ -2298,7 +2481,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_imag= e *im, void *image, void *i if (save_ret) emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); =20 - EMIT1(0x5B); /* pop rbx */ + emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off); EMIT1(0xC9); /* leave */ if (flags & BPF_TRAMP_F_SKIP_FRAME) /* skip our return address and return to parent */ --=20 2.40.1 From nobody Sun Feb 8 10:33:20 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 64B88EB64D8 for ; Thu, 22 Jun 2023 07:58:18 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230431AbjFVH6O (ORCPT ); Thu, 22 Jun 2023 03:58:14 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:55526 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230453AbjFVH6D (ORCPT ); Thu, 22 Jun 2023 03:58:03 -0400 Received: from mail-pl1-x641.google.com (mail-pl1-x641.google.com [IPv6:2607:f8b0:4864:20::641]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id EF62D1FF9; Thu, 22 Jun 2023 00:57:46 -0700 (PDT) Received: by mail-pl1-x641.google.com with SMTP id d9443c01a7336-1b512309d18so36600765ad.3; Thu, 22 Jun 2023 00:57:46 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20221208; t=1687420666; x=1690012666; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=L6ToYPTpAasb+y/gq67kPkqXDGMs0payZTh2X19SsFc=; b=QXwa6MCmOZyXRu5vaOocyH8JlQ03ll6PRh7Hy6wqCZ/xHFvS0N8i1w27V8CiXOTO8/ 54v4ISMmlJ3Zdv48eRDYVuJ/7tjcmJn/UhSY652KRbUxmTOf91PksftrC3fPuQ0yF5e+ 6JGenoJTOCdUSUiqGWsxjgHQ/aZfJ5Xf8Gf/fKZpCXvqL36fe/vgZ+a3qxzMiKIlcnMF /XLPJ6CVS7+iUtwl0Fs0k/6rxOV2r/cTF+oYnU7bX1xY6QFh91jorQmd6tcWfr8NnNNP elU51sRDOF2G3HQNk3dFh0cVkACUQpkgY7QpvCFNgzvHjaRDdO6Pj+NI6z5yP1Q3X6Jd pHFA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20221208; t=1687420666; x=1690012666; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=L6ToYPTpAasb+y/gq67kPkqXDGMs0payZTh2X19SsFc=; b=c9eBoi5JwQ+CC9z2FiTGM8LhF2IQ3+p5znvsda5txnJfYtNMJpuwxHSpZVV6fvvldq HTagx9WC8zC34Vx9ig1WnKHFj9B3RMCSO+XlpM+1xea2LPy7MkJLzEnk4KPUHqxDF08h dnltBZ+p27XcG7dinqJ70lMO+bdzdKZEm3yTrOSujBtTnsP73cniIad59vESF5/GyXqM xXwzwvQlm+Y5BXz/V52SDhpegLU9aV+2nkLDg4iidrKi5keSe0kJzL2pA7/6yf00ji+s 5wawx6utAbVc3MVkdHCrCrycjn9T75dhCLiZs4b+k+CxHKrtjB0GDRjV2s5KQngQkz92 Ynbg== X-Gm-Message-State: AC+VfDzz5xCK2dFW7k9bqKdGnGRdinTHprWcjikFGFf2W9B/Kz/qiGo9 X0MdD0vEsjPopBdQf89IUYs= X-Google-Smtp-Source: ACHHUZ5DPJZmXCJtCRCqrrMSPHIoDSGeYuTfQQjxdHWMLUBVp+XRfTm5Xt+n2FShoCZp6hwMVNt0cA== X-Received: by 2002:a17:902:e548:b0:1b6:788a:5527 with SMTP id n8-20020a170902e54800b001b6788a5527mr7843207plf.43.1687420666292; Thu, 22 Jun 2023 00:57:46 -0700 (PDT) Received: from localhost.localdomain ([203.205.141.82]) by smtp.gmail.com with ESMTPSA id iz3-20020a170902ef8300b001b3e84240b4sm4733338plb.67.2023.06.22.00.57.41 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Thu, 22 Jun 2023 00:57:45 -0700 (PDT) From: menglong8.dong@gmail.com X-Google-Original-From: imagedong@tencent.com To: yhs@meta.com, alexei.starovoitov@gmail.com Cc: ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@linux.dev, song@kernel.org, yhs@fb.com, john.fastabend@gmail.com, kpsingh@kernel.org, sdf@google.com, haoluo@google.com, jolsa@kernel.org, benbjiang@tencent.com, bpf@vger.kernel.org, linux-kernel@vger.kernel.org, Menglong Dong Subject: [PATCH bpf-next v7 3/3] selftests/bpf: add testcase for TRACING with 6+ arguments Date: Thu, 22 Jun 2023 15:57:15 +0800 Message-Id: <20230622075715.1818144-4-imagedong@tencent.com> X-Mailer: git-send-email 2.40.1 In-Reply-To: <20230622075715.1818144-1-imagedong@tencent.com> References: <20230622075715.1818144-1-imagedong@tencent.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: Menglong Dong Add test9/test10 in fexit_test.c and fentry_test.c to test the fentry and fexit whose target function have 7/11 arguments. Correspondingly, add bpf_testmod_fentry_test7() and bpf_testmod_fentry_test11() to bpf_testmod.c Meanwhile, add bpf_modify_return_test2() to test_run.c to test the MODIFY_RETURN with 7 arguments. Add bpf_testmod_test_struct_arg_7/bpf_testmod_test_struct_arg_7 in bpf_testmod.c to test the struct in the arguments. And the testcases passed: ./test_progs -t fexit Summary: 5/12 PASSED, 0 SKIPPED, 0 FAILED ./test_progs -t fentry Summary: 3/0 PASSED, 0 SKIPPED, 0 FAILED ./test_progs -t modify_return Summary: 1/0 PASSED, 0 SKIPPED, 0 FAILED ./test_progs -t tracing_struct Summary: 1/0 PASSED, 0 SKIPPED, 0 FAILED Signed-off-by: Menglong Dong Acked-by: Yonghong Song --- v6: - add testcases to tracing_struct.c instead of fentry_test.c and fexit_test.c v5: - add testcases for MODIFY_RETURN v4: - use different type for args in bpf_testmod_fentry_test{7,12} - add testcase for grabage values in ctx v3: - move bpf_fentry_test{7,12} to bpf_testmod.c and rename them to bpf_testmod_fentry_test{7,12} meanwhile - get return value by bpf_get_func_ret() in "fexit/bpf_testmod_fentry_test12", as we don't change ___bpf_ctx_cast() in this version --- net/bpf/test_run.c | 23 ++++++-- .../selftests/bpf/bpf_testmod/bpf_testmod.c | 49 ++++++++++++++++- .../selftests/bpf/prog_tests/fentry_fexit.c | 4 +- .../selftests/bpf/prog_tests/fentry_test.c | 2 + .../selftests/bpf/prog_tests/fexit_test.c | 2 + .../selftests/bpf/prog_tests/modify_return.c | 20 ++++++- .../selftests/bpf/prog_tests/tracing_struct.c | 19 +++++++ .../testing/selftests/bpf/progs/fentry_test.c | 32 +++++++++++ .../testing/selftests/bpf/progs/fexit_test.c | 33 ++++++++++++ .../selftests/bpf/progs/modify_return.c | 40 ++++++++++++++ .../selftests/bpf/progs/tracing_struct.c | 54 +++++++++++++++++++ 11 files changed, 271 insertions(+), 7 deletions(-) diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 2321bd2f9964..df58e8bf5e07 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -561,6 +561,13 @@ __bpf_kfunc int bpf_modify_return_test(int a, int *b) return a + *b; } =20 +__bpf_kfunc int bpf_modify_return_test2(int a, int *b, short c, int d, + void *e, char f, int g) +{ + *b +=3D 1; + return a + *b + c + d + (long)e + f + g; +} + int noinline bpf_fentry_shadow_test(int a) { return a + 1; @@ -596,9 +603,13 @@ __diag_pop(); =20 BTF_SET8_START(bpf_test_modify_return_ids) BTF_ID_FLAGS(func, bpf_modify_return_test) +BTF_ID_FLAGS(func, bpf_modify_return_test2) BTF_ID_FLAGS(func, bpf_fentry_test1, KF_SLEEPABLE) BTF_SET8_END(bpf_test_modify_return_ids) =20 +BTF_ID_LIST(bpf_modify_return_test_id) +BTF_ID(func, bpf_modify_return_test) + static const struct btf_kfunc_id_set bpf_test_modify_return_set =3D { .owner =3D THIS_MODULE, .set =3D &bpf_test_modify_return_ids, @@ -661,9 +672,15 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog, goto out; break; case BPF_MODIFY_RETURN: - ret =3D bpf_modify_return_test(1, &b); - if (b !=3D 2) - side_effect =3D 1; + if (prog->aux->attach_btf_id =3D=3D *bpf_modify_return_test_id) { + ret =3D bpf_modify_return_test(1, &b); + if (b !=3D 2) + side_effect =3D 1; + } else { + ret =3D bpf_modify_return_test2(1, &b, 3, 4, (void *)5, 6, 7); + if (b !=3D 2) + side_effect =3D 1; + } break; default: goto out; diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/= testing/selftests/bpf/bpf_testmod/bpf_testmod.c index aaf6ef1201c7..a6f991b56345 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -34,6 +34,11 @@ struct bpf_testmod_struct_arg_3 { int b[]; }; =20 +struct bpf_testmod_struct_arg_4 { + u64 a; + int b; +}; + __diag_push(); __diag_ignore_all("-Wmissing-prototypes", "Global functions as their definitions will be in bpf_testmod.ko BTF"); @@ -75,6 +80,24 @@ bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_= arg_3 *a) { return bpf_testmod_test_struct_arg_result; } =20 +noinline int +bpf_testmod_test_struct_arg_7(u64 a, void *b, short c, int d, void *e, + struct bpf_testmod_struct_arg_4 f) +{ + bpf_testmod_test_struct_arg_result =3D a + (long)b + c + d + + (long)e + f.a + f.b; + return bpf_testmod_test_struct_arg_result; +} + +noinline int +bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e, + struct bpf_testmod_struct_arg_4 f, int g) +{ + bpf_testmod_test_struct_arg_result =3D a + (long)b + c + d + + (long)e + f.a + f.b + g; + return bpf_testmod_test_struct_arg_result; +} + __bpf_kfunc void bpf_testmod_test_mod_kfunc(int i) { @@ -191,6 +214,20 @@ noinline int bpf_testmod_fentry_test3(char a, int b, u= 64 c) return a + b + c; } =20 +noinline int bpf_testmod_fentry_test7(u64 a, void *b, short c, int d, + void *e, char f, int g) +{ + return a + (long)b + c + d + (long)e + f + g; +} + +noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d, + void *e, char f, int g, + unsigned int h, long i, __u64 j, + unsigned long k) +{ + return a + (long)b + c + d + (long)e + f + g + h + i + j + k; +} + int bpf_testmod_fentry_ok; =20 noinline ssize_t @@ -206,6 +243,7 @@ bpf_testmod_test_read(struct file *file, struct kobject= *kobj, struct bpf_testmod_struct_arg_1 struct_arg1 =3D {10}; struct bpf_testmod_struct_arg_2 struct_arg2 =3D {2, 3}; struct bpf_testmod_struct_arg_3 *struct_arg3; + struct bpf_testmod_struct_arg_4 struct_arg4 =3D {21, 22}; int i =3D 1; =20 while (bpf_testmod_return_ptr(i)) @@ -216,6 +254,11 @@ bpf_testmod_test_read(struct file *file, struct kobjec= t *kobj, (void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2); (void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2); (void)bpf_testmod_test_struct_arg_5(); + (void)bpf_testmod_test_struct_arg_7(16, (void *)17, 18, 19, + (void *)20, struct_arg4); + (void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19, + (void *)20, struct_arg4, 23); + =20 struct_arg3 =3D kmalloc((sizeof(struct bpf_testmod_struct_arg_3) + sizeof(int)), GFP_KERNEL); @@ -243,7 +286,11 @@ bpf_testmod_test_read(struct file *file, struct kobjec= t *kobj, =20 if (bpf_testmod_fentry_test1(1) !=3D 2 || bpf_testmod_fentry_test2(2, 3) !=3D 5 || - bpf_testmod_fentry_test3(4, 5, 6) !=3D 15) + bpf_testmod_fentry_test3(4, 5, 6) !=3D 15 || + bpf_testmod_fentry_test7(16, (void *)17, 18, 19, (void *)20, + 21, 22) !=3D 133 || + bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20, + 21, 22, 23, 24, 25, 26) !=3D 231) goto out; =20 bpf_testmod_fentry_ok =3D 1; diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c b/tools/= testing/selftests/bpf/prog_tests/fentry_fexit.c index 130f5b82d2e6..0078acee0ede 100644 --- a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c +++ b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c @@ -31,10 +31,12 @@ void test_fentry_fexit(void) ASSERT_OK(err, "ipv6 test_run"); ASSERT_OK(topts.retval, "ipv6 test retval"); =20 + ASSERT_OK(trigger_module_test_read(1), "trigger_read"); + fentry_res =3D (__u64 *)fentry_skel->bss; fexit_res =3D (__u64 *)fexit_skel->bss; printf("%lld\n", fentry_skel->bss->test1_result); - for (i =3D 0; i < 8; i++) { + for (i =3D 0; i < 11; i++) { ASSERT_EQ(fentry_res[i], 1, "fentry result"); ASSERT_EQ(fexit_res[i], 1, "fexit result"); } diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_test.c b/tools/t= esting/selftests/bpf/prog_tests/fentry_test.c index c0d1d61d5f66..e1c0ce40febf 100644 --- a/tools/testing/selftests/bpf/prog_tests/fentry_test.c +++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c @@ -24,6 +24,8 @@ static int fentry_test(struct fentry_test_lskel *fentry_s= kel) ASSERT_OK(err, "test_run"); ASSERT_EQ(topts.retval, 0, "test_run"); =20 + ASSERT_OK(trigger_module_test_read(1), "trigger_read"); + result =3D (__u64 *)fentry_skel->bss; for (i =3D 0; i < sizeof(*fentry_skel->bss) / sizeof(__u64); i++) { if (!ASSERT_EQ(result[i], 1, "fentry_result")) diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_test.c b/tools/te= sting/selftests/bpf/prog_tests/fexit_test.c index 101b7343036b..ea81fa913ec6 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_test.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_test.c @@ -24,6 +24,8 @@ static int fexit_test(struct fexit_test_lskel *fexit_skel) ASSERT_OK(err, "test_run"); ASSERT_EQ(topts.retval, 0, "test_run"); =20 + ASSERT_OK(trigger_module_test_read(1), "trigger_read"); + result =3D (__u64 *)fexit_skel->bss; for (i =3D 0; i < sizeof(*fexit_skel->bss) / sizeof(__u64); i++) { if (!ASSERT_EQ(result[i], 1, "fexit_result")) diff --git a/tools/testing/selftests/bpf/prog_tests/modify_return.c b/tools= /testing/selftests/bpf/prog_tests/modify_return.c index 5d9955af6247..93febb6d81ef 100644 --- a/tools/testing/selftests/bpf/prog_tests/modify_return.c +++ b/tools/testing/selftests/bpf/prog_tests/modify_return.c @@ -11,7 +11,8 @@ #define UPPER(x) ((x) >> 16) =20 =20 -static void run_test(__u32 input_retval, __u16 want_side_effect, __s16 wan= t_ret) +static void run_test(__u32 input_retval, __u16 want_side_effect, + __s16 want_ret, __s16 want_ret2) { struct modify_return *skel =3D NULL; int err, prog_fd; @@ -41,6 +42,19 @@ static void run_test(__u32 input_retval, __u16 want_side= _effect, __s16 want_ret) ASSERT_EQ(skel->bss->fexit_result, 1, "modify_return fexit_result"); ASSERT_EQ(skel->bss->fmod_ret_result, 1, "modify_return fmod_ret_result"); =20 + prog_fd =3D bpf_program__fd(skel->progs.fmod_ret_test2); + err =3D bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + + side_effect =3D UPPER(topts.retval); + ret =3D LOWER(topts.retval); + + ASSERT_EQ(ret, want_ret2, "test_run ret2"); + ASSERT_EQ(side_effect, want_side_effect, "modify_return side_effect2"); + ASSERT_EQ(skel->bss->fentry_result2, 1, "modify_return fentry_result2"); + ASSERT_EQ(skel->bss->fexit_result2, 1, "modify_return fexit_result2"); + ASSERT_EQ(skel->bss->fmod_ret_result2, 1, "modify_return fmod_ret_result2= "); + cleanup: modify_return__destroy(skel); } @@ -50,8 +64,10 @@ void serial_test_modify_return(void) { run_test(0 /* input_retval */, 1 /* want_side_effect */, - 4 /* want_ret */); + 4 /* want_ret */, + 29 /* want_ret */); run_test(-EINVAL /* input_retval */, 0 /* want_side_effect */, + -EINVAL /* want_ret */, -EINVAL /* want_ret */); } diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c b/tool= s/testing/selftests/bpf/prog_tests/tracing_struct.c index 1c75a32186d6..fe0fb0c9849a 100644 --- a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c +++ b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c @@ -55,6 +55,25 @@ static void test_fentry(void) =20 ASSERT_EQ(skel->bss->t6, 1, "t6 ret"); =20 + ASSERT_EQ(skel->bss->t7_a, 16, "t7:a"); + ASSERT_EQ(skel->bss->t7_b, 17, "t7:b"); + ASSERT_EQ(skel->bss->t7_c, 18, "t7:c"); + ASSERT_EQ(skel->bss->t7_d, 19, "t7:d"); + ASSERT_EQ(skel->bss->t7_e, 20, "t7:e"); + ASSERT_EQ(skel->bss->t7_f_a, 21, "t7:f.a"); + ASSERT_EQ(skel->bss->t7_f_b, 22, "t7:f.b"); + ASSERT_EQ(skel->bss->t7_ret, 133, "t7 ret"); + + ASSERT_EQ(skel->bss->t8_a, 16, "t8:a"); + ASSERT_EQ(skel->bss->t8_b, 17, "t8:b"); + ASSERT_EQ(skel->bss->t8_c, 18, "t8:c"); + ASSERT_EQ(skel->bss->t8_d, 19, "t8:d"); + ASSERT_EQ(skel->bss->t8_e, 20, "t8:e"); + ASSERT_EQ(skel->bss->t8_f_a, 21, "t8:f.a"); + ASSERT_EQ(skel->bss->t8_f_b, 22, "t8:f.b"); + ASSERT_EQ(skel->bss->t8_g, 23, "t8:g"); + ASSERT_EQ(skel->bss->t8_ret, 156, "t8 ret"); + tracing_struct__detach(skel); destroy_skel: tracing_struct__destroy(skel); diff --git a/tools/testing/selftests/bpf/progs/fentry_test.c b/tools/testin= g/selftests/bpf/progs/fentry_test.c index 52a550d281d9..18a0ea63cf67 100644 --- a/tools/testing/selftests/bpf/progs/fentry_test.c +++ b/tools/testing/selftests/bpf/progs/fentry_test.c @@ -77,3 +77,35 @@ int BPF_PROG(test8, struct bpf_fentry_test_t *arg) test8_result =3D 1; return 0; } + +__u64 test9_result =3D 0; +SEC("fentry/bpf_testmod_fentry_test7") +int BPF_PROG(test9, __u64 a, void *b, short c, int d, void *e, char f, + int g) +{ + test9_result =3D a =3D=3D 16 && b =3D=3D (void *)17 && c =3D=3D 18 && d = =3D=3D 19 && + e =3D=3D (void *)20 && f =3D=3D 21 && g =3D=3D 22; + return 0; +} + +__u64 test10_result =3D 0; +SEC("fentry/bpf_testmod_fentry_test11") +int BPF_PROG(test10, __u64 a, void *b, short c, int d, void *e, char f, + int g, unsigned int h, long i, __u64 j, unsigned long k) +{ + test10_result =3D a =3D=3D 16 && b =3D=3D (void *)17 && c =3D=3D 18 && d = =3D=3D 19 && + e =3D=3D (void *)20 && f =3D=3D 21 && g =3D=3D 22 && h =3D=3D 23 && + i =3D=3D 24 && j =3D=3D 25 && k =3D=3D 26; + return 0; +} + +__u64 test11_result =3D 0; +SEC("fentry/bpf_testmod_fentry_test11") +int BPF_PROG(test11, __u64 a, __u64 b, __u64 c, __u64 d, __u64 e, __u64 f, + __u64 g, __u64 h, __u64 i, __u64 j, __u64 k) +{ + test11_result =3D a =3D=3D 16 && b =3D=3D 17 && c =3D=3D 18 && d =3D=3D 1= 9 && + e =3D=3D 20 && f =3D=3D 21 && g =3D=3D 22 && h =3D=3D 23 && + i =3D=3D 24 && j =3D=3D 25 && k =3D=3D 26; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/fexit_test.c b/tools/testing= /selftests/bpf/progs/fexit_test.c index 8f1ccb7302e1..b2126fb781a1 100644 --- a/tools/testing/selftests/bpf/progs/fexit_test.c +++ b/tools/testing/selftests/bpf/progs/fexit_test.c @@ -78,3 +78,36 @@ int BPF_PROG(test8, struct bpf_fentry_test_t *arg) test8_result =3D 1; return 0; } + +__u64 test9_result =3D 0; +SEC("fexit/bpf_testmod_fentry_test7") +int BPF_PROG(test9, __u64 a, void *b, short c, int d, void *e, char f, + int g, int ret) +{ + test9_result =3D a =3D=3D 16 && b =3D=3D (void *)17 && c =3D=3D 18 && d = =3D=3D 19 && + e =3D=3D (void *)20 && f =3D=3D 21 && g =3D=3D 22 && ret =3D=3D 133; + return 0; +} + +__u64 test10_result =3D 0; +SEC("fexit/bpf_testmod_fentry_test11") +int BPF_PROG(test10, __u64 a, void *b, short c, int d, void *e, char f, + int g, unsigned int h, long i, __u64 j, unsigned long k, + int ret) +{ + test10_result =3D a =3D=3D 16 && b =3D=3D (void *)17 && c =3D=3D 18 && d = =3D=3D 19 && + e =3D=3D (void *)20 && f =3D=3D 21 && g =3D=3D 22 && h =3D=3D 23 && + i =3D=3D 24 && j =3D=3D 25 && k =3D=3D 26 && ret =3D=3D 231; + return 0; +} + +__u64 test11_result =3D 0; +SEC("fexit/bpf_testmod_fentry_test11") +int BPF_PROG(test11, __u64 a, __u64 b, __u64 c, __u64 d, __u64 e, __u64 f, + __u64 g, __u64 h, __u64 i, __u64 j, __u64 k, __u64 ret) +{ + test11_result =3D a =3D=3D 16 && b =3D=3D 17 && c =3D=3D 18 && d =3D=3D 1= 9 && + e =3D=3D 20 && f =3D=3D 21 && g =3D=3D 22 && h =3D=3D 23 && + i =3D=3D 24 && j =3D=3D 25 && k =3D=3D 26 && ret =3D=3D 231; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/modify_return.c b/tools/test= ing/selftests/bpf/progs/modify_return.c index 8b7466a15c6b..3376d4849f58 100644 --- a/tools/testing/selftests/bpf/progs/modify_return.c +++ b/tools/testing/selftests/bpf/progs/modify_return.c @@ -47,3 +47,43 @@ int BPF_PROG(fexit_test, int a, __u64 b, int ret) =20 return 0; } + +static int sequence2; + +__u64 fentry_result2 =3D 0; +SEC("fentry/bpf_modify_return_test2") +int BPF_PROG(fentry_test2, int a, int *b, short c, int d, void *e, char f, + int g) +{ + sequence2++; + fentry_result2 =3D (sequence2 =3D=3D 1); + return 0; +} + +__u64 fmod_ret_result2 =3D 0; +SEC("fmod_ret/bpf_modify_return_test2") +int BPF_PROG(fmod_ret_test2, int a, int *b, short c, int d, void *e, char = f, + int g, int ret) +{ + sequence2++; + /* This is the first fmod_ret program, the ret passed should be 0 */ + fmod_ret_result2 =3D (sequence2 =3D=3D 2 && ret =3D=3D 0); + return input_retval; +} + +__u64 fexit_result2 =3D 0; +SEC("fexit/bpf_modify_return_test2") +int BPF_PROG(fexit_test2, int a, int *b, short c, int d, void *e, char f, + int g, int ret) +{ + sequence2++; + /* If the input_reval is non-zero a successful modification should have + * occurred. + */ + if (input_retval) + fexit_result2 =3D (sequence2 =3D=3D 3 && ret =3D=3D input_retval); + else + fexit_result2 =3D (sequence2 =3D=3D 3 && ret =3D=3D 29); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/tracing_struct.c b/tools/tes= ting/selftests/bpf/progs/tracing_struct.c index c435a3a8328a..515daef3c84b 100644 --- a/tools/testing/selftests/bpf/progs/tracing_struct.c +++ b/tools/testing/selftests/bpf/progs/tracing_struct.c @@ -18,6 +18,11 @@ struct bpf_testmod_struct_arg_3 { int b[]; }; =20 +struct bpf_testmod_struct_arg_4 { + u64 a; + int b; +}; + long t1_a_a, t1_a_b, t1_b, t1_c, t1_ret, t1_nregs; __u64 t1_reg0, t1_reg1, t1_reg2, t1_reg3; long t2_a, t2_b_a, t2_b_b, t2_c, t2_ret; @@ -25,6 +30,9 @@ long t3_a, t3_b, t3_c_a, t3_c_b, t3_ret; long t4_a_a, t4_b, t4_c, t4_d, t4_e_a, t4_e_b, t4_ret; long t5_ret; int t6; +long t7_a, t7_b, t7_c, t7_d, t7_e, t7_f_a, t7_f_b, t7_ret; +long t8_a, t8_b, t8_c, t8_d, t8_e, t8_f_a, t8_f_b, t8_g, t8_ret; + =20 SEC("fentry/bpf_testmod_test_struct_arg_1") int BPF_PROG2(test_struct_arg_1, struct bpf_testmod_struct_arg_2, a, int, = b, int, c) @@ -130,4 +138,50 @@ int BPF_PROG2(test_struct_arg_11, struct bpf_testmod_s= truct_arg_3 *, a) return 0; } =20 +SEC("fentry/bpf_testmod_test_struct_arg_7") +int BPF_PROG2(test_struct_arg_12, __u64, a, void *, b, short, c, int, d, + void *, e, struct bpf_testmod_struct_arg_4, f) +{ + t7_a =3D a; + t7_b =3D (long)b; + t7_c =3D c; + t7_d =3D d; + t7_e =3D (long)e; + t7_f_a =3D f.a; + t7_f_b =3D f.b; + return 0; +} + +SEC("fexit/bpf_testmod_test_struct_arg_7") +int BPF_PROG2(test_struct_arg_13, __u64, a, void *, b, short, c, int, d, + void *, e, struct bpf_testmod_struct_arg_4, f, int, ret) +{ + t7_ret =3D ret; + return 0; +} + +SEC("fentry/bpf_testmod_test_struct_arg_8") +int BPF_PROG2(test_struct_arg_14, __u64, a, void *, b, short, c, int, d, + void *, e, struct bpf_testmod_struct_arg_4, f, int, g) +{ + t8_a =3D a; + t8_b =3D (long)b; + t8_c =3D c; + t8_d =3D d; + t8_e =3D (long)e; + t8_f_a =3D f.a; + t8_f_b =3D f.b; + t8_g =3D g; + return 0; +} + +SEC("fexit/bpf_testmod_test_struct_arg_8") +int BPF_PROG2(test_struct_arg_15, __u64, a, void *, b, short, c, int, d, + void *, e, struct bpf_testmod_struct_arg_4, f, int, g, + int, ret) +{ + t8_ret =3D ret; + return 0; +} + char _license[] SEC("license") =3D "GPL"; --=20 2.40.1