When conditional jumps are performed on the same scalar register
(e.g., r0 <= r0, r0 > r0, r0 < r0), the BPF verifier incorrectly
attempts to adjust the register's min/max bounds. This leads to
invalid range bounds and triggers a BUG warning.
The problematic BPF program:
0: call bpf_get_prandom_u32
1: w8 = 0x80000000
2: r0 &= r8
3: if r0 > r0 goto <exit>
The instruction 3 triggers kernel warning:
3: if r0 > r0 goto <exit>
true_reg1: range bounds violation u64=[0x1, 0x0] s64=[0x1, 0x0] u32=[0x1, 0x0] s32=[0x1, 0x0] var_off=(0x0, 0x0)
true_reg2: const tnum out of sync with range bounds u64=[0x0, 0xffffffffffffffff] s64=[0x8000000000000000, 0x7fffffffffffffff] var_off=(0x0, 0x0)
Comparing a register with itself should not change its bounds and
for most comparison operations, comparing a register with itself has
a known result (e.g., r0 == r0 is always true, r0 < r0 is always false).
Fix this by:
1. Enhance is_scalar_branch_taken() to properly handle branch direction
computation for same register comparisons across all BPF jump operations
2. Adds early return in reg_set_min_max() to avoid bounds adjustment
for unknown branch directions (e.g., BPF_JSET) on the same register
The fix ensures that unnecessary bounds adjustments are skipped, preventing
the verifier bug while maintaining correct branch direction analysis.
Reported-by: Kaiyan Mei <M202472210@hust.edu.cn>
Reported-by: Yinhao Hu <dddddd@hust.edu.cn>
Closes: https://lore.kernel.org/all/1881f0f5.300df.199f2576a01.Coremail.kaiyanm@hust.edu.cn/
Signed-off-by: KaFai Wan <kafai.wan@linux.dev>
---
kernel/bpf/verifier.c | 33 +++++++++++++++++++++++++++++++++
1 file changed, 33 insertions(+)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 542e23fb19c7..a571263f4ebe 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -15995,6 +15995,8 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta
switch (opcode) {
case BPF_JEQ:
+ if (reg1 == reg2)
+ return 1;
/* constants, umin/umax and smin/smax checks would be
* redundant in this case because they all should match
*/
@@ -16021,6 +16023,8 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta
}
break;
case BPF_JNE:
+ if (reg1 == reg2)
+ return 0;
/* constants, umin/umax and smin/smax checks would be
* redundant in this case because they all should match
*/
@@ -16047,6 +16051,12 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta
}
break;
case BPF_JSET:
+ if (reg1 == reg2) {
+ if (tnum_is_const(t1))
+ return t1.value != 0;
+ else
+ return (smin1 <= 0 && smax1 >= 0) ? -1 : 1;
+ }
if (!is_reg_const(reg2, is_jmp32)) {
swap(reg1, reg2);
swap(t1, t2);
@@ -16059,48 +16069,64 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta
return 0;
break;
case BPF_JGT:
+ if (reg1 == reg2)
+ return 0;
if (umin1 > umax2)
return 1;
else if (umax1 <= umin2)
return 0;
break;
case BPF_JSGT:
+ if (reg1 == reg2)
+ return 0;
if (smin1 > smax2)
return 1;
else if (smax1 <= smin2)
return 0;
break;
case BPF_JLT:
+ if (reg1 == reg2)
+ return 0;
if (umax1 < umin2)
return 1;
else if (umin1 >= umax2)
return 0;
break;
case BPF_JSLT:
+ if (reg1 == reg2)
+ return 0;
if (smax1 < smin2)
return 1;
else if (smin1 >= smax2)
return 0;
break;
case BPF_JGE:
+ if (reg1 == reg2)
+ return 1;
if (umin1 >= umax2)
return 1;
else if (umax1 < umin2)
return 0;
break;
case BPF_JSGE:
+ if (reg1 == reg2)
+ return 1;
if (smin1 >= smax2)
return 1;
else if (smax1 < smin2)
return 0;
break;
case BPF_JLE:
+ if (reg1 == reg2)
+ return 1;
if (umax1 <= umin2)
return 1;
else if (umin1 > umax2)
return 0;
break;
case BPF_JSLE:
+ if (reg1 == reg2)
+ return 1;
if (smax1 <= smin2)
return 1;
else if (smin1 > smax2)
@@ -16439,6 +16465,13 @@ static int reg_set_min_max(struct bpf_verifier_env *env,
if (false_reg1->type != SCALAR_VALUE || false_reg2->type != SCALAR_VALUE)
return 0;
+ /* We compute branch direction for same SCALAR_VALUE registers in
+ * is_scalar_branch_taken(). For unknown branch directions (e.g., BPF_JSET)
+ * on the same registers, we don't need to adjust the min/max values.
+ */
+ if (false_reg1 == false_reg2)
+ return 0;
+
/* fallthrough (FALSE) branch */
regs_refine_cond_op(false_reg1, false_reg2, rev_opcode(opcode), is_jmp32);
reg_bounds_sync(false_reg1);
--
2.43.0
On Fri, Oct 31, 2025 at 8:44 AM KaFai Wan <kafai.wan@linux.dev> wrote:
>
> When conditional jumps are performed on the same scalar register
> (e.g., r0 <= r0, r0 > r0, r0 < r0), the BPF verifier incorrectly
> attempts to adjust the register's min/max bounds. This leads to
> invalid range bounds and triggers a BUG warning.
>
> The problematic BPF program:
> 0: call bpf_get_prandom_u32
> 1: w8 = 0x80000000
> 2: r0 &= r8
> 3: if r0 > r0 goto <exit>
>
> The instruction 3 triggers kernel warning:
> 3: if r0 > r0 goto <exit>
> true_reg1: range bounds violation u64=[0x1, 0x0] s64=[0x1, 0x0] u32=[0x1, 0x0] s32=[0x1, 0x0] var_off=(0x0, 0x0)
> true_reg2: const tnum out of sync with range bounds u64=[0x0, 0xffffffffffffffff] s64=[0x8000000000000000, 0x7fffffffffffffff] var_off=(0x0, 0x0)
>
> Comparing a register with itself should not change its bounds and
> for most comparison operations, comparing a register with itself has
> a known result (e.g., r0 == r0 is always true, r0 < r0 is always false).
>
> Fix this by:
> 1. Enhance is_scalar_branch_taken() to properly handle branch direction
> computation for same register comparisons across all BPF jump operations
> 2. Adds early return in reg_set_min_max() to avoid bounds adjustment
> for unknown branch directions (e.g., BPF_JSET) on the same register
>
> The fix ensures that unnecessary bounds adjustments are skipped, preventing
> the verifier bug while maintaining correct branch direction analysis.
>
> Reported-by: Kaiyan Mei <M202472210@hust.edu.cn>
> Reported-by: Yinhao Hu <dddddd@hust.edu.cn>
> Closes: https://lore.kernel.org/all/1881f0f5.300df.199f2576a01.Coremail.kaiyanm@hust.edu.cn/
> Signed-off-by: KaFai Wan <kafai.wan@linux.dev>
> ---
> kernel/bpf/verifier.c | 33 +++++++++++++++++++++++++++++++++
> 1 file changed, 33 insertions(+)
>
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index 542e23fb19c7..a571263f4ebe 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -15995,6 +15995,8 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta
>
> switch (opcode) {
> case BPF_JEQ:
> + if (reg1 == reg2)
> + return 1;
> /* constants, umin/umax and smin/smax checks would be
> * redundant in this case because they all should match
> */
> @@ -16021,6 +16023,8 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta
> }
> break;
> case BPF_JNE:
> + if (reg1 == reg2)
> + return 0;
> /* constants, umin/umax and smin/smax checks would be
> * redundant in this case because they all should match
> */
> @@ -16047,6 +16051,12 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta
> }
> break;
> case BPF_JSET:
> + if (reg1 == reg2) {
> + if (tnum_is_const(t1))
> + return t1.value != 0;
> + else
> + return (smin1 <= 0 && smax1 >= 0) ? -1 : 1;
> + }
> if (!is_reg_const(reg2, is_jmp32)) {
> swap(reg1, reg2);
> swap(t1, t2);
> @@ -16059,48 +16069,64 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta
> return 0;
> break;
> case BPF_JGT:
> + if (reg1 == reg2)
> + return 0;
> if (umin1 > umax2)
> return 1;
> else if (umax1 <= umin2)
> return 0;
> break;
> case BPF_JSGT:
> + if (reg1 == reg2)
> + return 0;
This is uglier than the previous version.
reg1 == reg2 is a syzbot territory.
We shouldn't uglify the code everywhere because of it.
pw-bot: cr
On Fri, 2025-10-31 at 09:37 -0700, Alexei Starovoitov wrote:
> On Fri, Oct 31, 2025 at 8:44 AM KaFai Wan <kafai.wan@linux.dev> wrote:
> >
> > When conditional jumps are performed on the same scalar register
> > (e.g., r0 <= r0, r0 > r0, r0 < r0), the BPF verifier incorrectly
> > attempts to adjust the register's min/max bounds. This leads to
> > invalid range bounds and triggers a BUG warning.
> >
> > The problematic BPF program:
> > 0: call bpf_get_prandom_u32
> > 1: w8 = 0x80000000
> > 2: r0 &= r8
> > 3: if r0 > r0 goto <exit>
> >
> > The instruction 3 triggers kernel warning:
> > 3: if r0 > r0 goto <exit>
> > true_reg1: range bounds violation u64=[0x1, 0x0] s64=[0x1, 0x0] u32=[0x1, 0x0] s32=[0x1, 0x0]
> > var_off=(0x0, 0x0)
> > true_reg2: const tnum out of sync with range bounds u64=[0x0, 0xffffffffffffffff]
> > s64=[0x8000000000000000, 0x7fffffffffffffff] var_off=(0x0, 0x0)
> >
> > Comparing a register with itself should not change its bounds and
> > for most comparison operations, comparing a register with itself has
> > a known result (e.g., r0 == r0 is always true, r0 < r0 is always false).
> >
> > Fix this by:
> > 1. Enhance is_scalar_branch_taken() to properly handle branch direction
> > computation for same register comparisons across all BPF jump operations
> > 2. Adds early return in reg_set_min_max() to avoid bounds adjustment
> > for unknown branch directions (e.g., BPF_JSET) on the same register
> >
> > The fix ensures that unnecessary bounds adjustments are skipped, preventing
> > the verifier bug while maintaining correct branch direction analysis.
> >
> > Reported-by: Kaiyan Mei <M202472210@hust.edu.cn>
> > Reported-by: Yinhao Hu <dddddd@hust.edu.cn>
> > Closes: https://lore.kernel.org/all/1881f0f5.300df.199f2576a01.Coremail.kaiyanm@hust.edu.cn/
> > Signed-off-by: KaFai Wan <kafai.wan@linux.dev>
> > ---
> > kernel/bpf/verifier.c | 33 +++++++++++++++++++++++++++++++++
> > 1 file changed, 33 insertions(+)
> >
> > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> > index 542e23fb19c7..a571263f4ebe 100644
> > --- a/kernel/bpf/verifier.c
> > +++ b/kernel/bpf/verifier.c
> > @@ -15995,6 +15995,8 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct
> > bpf_reg_sta
> >
> > switch (opcode) {
> > case BPF_JEQ:
> > + if (reg1 == reg2)
> > + return 1;
> > /* constants, umin/umax and smin/smax checks would be
> > * redundant in this case because they all should match
> > */
> > @@ -16021,6 +16023,8 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct
> > bpf_reg_sta
> > }
> > break;
> > case BPF_JNE:
> > + if (reg1 == reg2)
> > + return 0;
> > /* constants, umin/umax and smin/smax checks would be
> > * redundant in this case because they all should match
> > */
> > @@ -16047,6 +16051,12 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct
> > bpf_reg_sta
> > }
> > break;
> > case BPF_JSET:
> > + if (reg1 == reg2) {
> > + if (tnum_is_const(t1))
> > + return t1.value != 0;
> > + else
> > + return (smin1 <= 0 && smax1 >= 0) ? -1 : 1;
> > + }
> > if (!is_reg_const(reg2, is_jmp32)) {
> > swap(reg1, reg2);
> > swap(t1, t2);
> > @@ -16059,48 +16069,64 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct
> > bpf_reg_sta
> > return 0;
> > break;
> > case BPF_JGT:
> > + if (reg1 == reg2)
> > + return 0;
> > if (umin1 > umax2)
> > return 1;
> > else if (umax1 <= umin2)
> > return 0;
> > break;
> > case BPF_JSGT:
> > + if (reg1 == reg2)
> > + return 0;
>
> This is uglier than the previous version.
> reg1 == reg2 is a syzbot territory.
> We shouldn't uglify the code everywhere because of it.
>
ok, will update in v4.
> pw-bot: cr
--
Thanks,
KaFai
© 2016 - 2026 Red Hat, Inc.