Skip to content

Commit ded969a

Browse files
anakryikointel-lab-lkp
authored andcommitted
bpf: drop knowledge-losing __reg_combine_{32,64}_into_{64,32} logic
When performing 32-bit conditional operation operating on lower 32 bits of a full 64-bit register, register full value isn't changed. We just potentially gain new knowledge about that register's lower 32 bits. Unfortunately, __reg_combine_{32,64}_into_{64,32} logic that reg_set_min_max() performs as a last step, can lose information in some cases due to __mark_reg64_unbounded() and __reg_assign_32_into_64(). That's bad and completely unnecessary. Especially __reg_assign_32_into_64() looks completely out of place here, because we are not performing zero-extending subregister assignment during conditional jump. So this patch replaced __reg_combine_* with just a normal reg_bounds_sync() which will do a proper job of deriving u64/s64 bounds from u32/s32, and vice versa (among all other combinations). __reg_combine_64_into_32() is also used in one more place, coerce_reg_to_size(), while handling 1- and 2-byte register loads. Looking into this, it seems like besides marking subregister as unbounded before performing reg_bounds_sync(), we were also performing deduction of smin32/smax32 and umin32/umax32 bounds from respective smin/smax and umin/umax bounds. It's now redundant as reg_bounds_sync() performs all the same logic more generically (e.g., without unnecessary assumption that upper 32 bits of full register should be zero). Long story short, we remove __reg_combine_64_into_32() completely, and coerce_reg_to_size() now only does resetting subreg to unbounded and then performing reg_bounds_sync() to recover as much information as possible from 64-bit umin/umax and smin/smax bounds, set explicitly in coerce_reg_to_size() earlier. Signed-off-by: Andrii Nakryiko <[email protected]>
1 parent 0960044 commit ded969a

File tree

1 file changed

+8
-52
lines changed

1 file changed

+8
-52
lines changed

kernel/bpf/verifier.c

Lines changed: 8 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -2348,51 +2348,6 @@ static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
23482348
}
23492349
}
23502350

2351-
static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
2352-
{
2353-
/* special case when 64-bit register has upper 32-bit register
2354-
* zeroed. Typically happens after zext or <<32, >>32 sequence
2355-
* allowing us to use 32-bit bounds directly,
2356-
*/
2357-
if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) {
2358-
__reg_assign_32_into_64(reg);
2359-
} else {
2360-
/* Otherwise the best we can do is push lower 32bit known and
2361-
* unknown bits into register (var_off set from jmp logic)
2362-
* then learn as much as possible from the 64-bit tnum
2363-
* known and unknown bits. The previous smin/smax bounds are
2364-
* invalid here because of jmp32 compare so mark them unknown
2365-
* so they do not impact tnum bounds calculation.
2366-
*/
2367-
__mark_reg64_unbounded(reg);
2368-
}
2369-
reg_bounds_sync(reg);
2370-
}
2371-
2372-
static bool __reg64_bound_s32(s64 a)
2373-
{
2374-
return a >= S32_MIN && a <= S32_MAX;
2375-
}
2376-
2377-
static bool __reg64_bound_u32(u64 a)
2378-
{
2379-
return a >= U32_MIN && a <= U32_MAX;
2380-
}
2381-
2382-
static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
2383-
{
2384-
__mark_reg32_unbounded(reg);
2385-
if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) {
2386-
reg->s32_min_value = (s32)reg->smin_value;
2387-
reg->s32_max_value = (s32)reg->smax_value;
2388-
}
2389-
if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) {
2390-
reg->u32_min_value = (u32)reg->umin_value;
2391-
reg->u32_max_value = (u32)reg->umax_value;
2392-
}
2393-
reg_bounds_sync(reg);
2394-
}
2395-
23962351
/* Mark a register as having a completely unknown (scalar) value. */
23972352
static void __mark_reg_unknown(const struct bpf_verifier_env *env,
23982353
struct bpf_reg_state *reg)
@@ -6089,9 +6044,10 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
60896044
* values are also truncated so we push 64-bit bounds into
60906045
* 32-bit bounds. Above were truncated < 32-bits already.
60916046
*/
6092-
if (size >= 4)
6093-
return;
6094-
__reg_combine_64_into_32(reg);
6047+
if (size < 4) {
6048+
__mark_reg32_unbounded(reg);
6049+
reg_bounds_sync(reg);
6050+
}
60956051
}
60966052

60976053
static void set_sext64_default_val(struct bpf_reg_state *reg, int size)
@@ -14169,13 +14125,13 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
1416914125
tnum_subreg(false_32off));
1417014126
true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off),
1417114127
tnum_subreg(true_32off));
14172-
__reg_combine_32_into_64(false_reg);
14173-
__reg_combine_32_into_64(true_reg);
14128+
reg_bounds_sync(false_reg);
14129+
reg_bounds_sync(true_reg);
1417414130
} else {
1417514131
false_reg->var_off = false_64off;
1417614132
true_reg->var_off = true_64off;
14177-
__reg_combine_64_into_32(false_reg);
14178-
__reg_combine_64_into_32(true_reg);
14133+
reg_bounds_sync(false_reg);
14134+
reg_bounds_sync(true_reg);
1417914135
}
1418014136
}
1418114137

0 commit comments

Comments
 (0)