forked from luck/tmp_suning_uos_patched
bpf: verifier: Allocate idmap scratch in verifier env
commit c9e73e3d2b1eb1ea7ff068e05007eec3bd8ef1c9 upstream. func_states_equal makes a very short lived allocation for idmap, probably because it's too large to fit on the stack. However the function is called quite often, leading to a lot of alloc / free churn. Replace the temporary allocation with dedicated scratch space in struct bpf_verifier_env. Signed-off-by: Lorenz Bauer <lmb@cloudflare.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Edward Cree <ecree.xilinx@gmail.com> Link: https://lore.kernel.org/bpf/20210429134656.122225-4-lmb@cloudflare.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
a11ca29c65
commit
ffb9d5c48b
|
@ -204,6 +204,13 @@ struct bpf_idx_pair {
|
|||
u32 idx;
|
||||
};
|
||||
|
||||
struct bpf_id_pair {
|
||||
u32 old;
|
||||
u32 cur;
|
||||
};
|
||||
|
||||
/* Maximum number of register states that can exist at once */
|
||||
#define BPF_ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
|
||||
#define MAX_CALL_FRAMES 8
|
||||
struct bpf_verifier_state {
|
||||
/* call stack tracking */
|
||||
|
@ -401,6 +408,7 @@ struct bpf_verifier_env {
|
|||
const struct bpf_line_info *prev_linfo;
|
||||
struct bpf_verifier_log log;
|
||||
struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
|
||||
struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE];
|
||||
struct {
|
||||
int *insn_state;
|
||||
int *insn_stack;
|
||||
|
|
|
@ -8962,13 +8962,6 @@ static bool range_within(struct bpf_reg_state *old,
|
|||
old->s32_max_value >= cur->s32_max_value;
|
||||
}
|
||||
|
||||
/* Maximum number of register states that can exist at once */
|
||||
#define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
|
||||
struct idpair {
|
||||
u32 old;
|
||||
u32 cur;
|
||||
};
|
||||
|
||||
/* If in the old state two registers had the same id, then they need to have
|
||||
* the same id in the new state as well. But that id could be different from
|
||||
* the old state, so we need to track the mapping from old to new ids.
|
||||
|
@ -8979,11 +8972,11 @@ struct idpair {
|
|||
* So we look through our idmap to see if this old id has been seen before. If
|
||||
* so, we require the new id to match; otherwise, we add the id pair to the map.
|
||||
*/
|
||||
static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
|
||||
static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < ID_MAP_SIZE; i++) {
|
||||
for (i = 0; i < BPF_ID_MAP_SIZE; i++) {
|
||||
if (!idmap[i].old) {
|
||||
/* Reached an empty slot; haven't seen this id before */
|
||||
idmap[i].old = old_id;
|
||||
|
@ -9096,7 +9089,7 @@ static void clean_live_states(struct bpf_verifier_env *env, int insn,
|
|||
|
||||
/* Returns true if (rold safe implies rcur safe) */
|
||||
static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
|
||||
struct idpair *idmap)
|
||||
struct bpf_id_pair *idmap)
|
||||
{
|
||||
bool equal;
|
||||
|
||||
|
@ -9213,7 +9206,7 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
|
|||
|
||||
static bool stacksafe(struct bpf_func_state *old,
|
||||
struct bpf_func_state *cur,
|
||||
struct idpair *idmap)
|
||||
struct bpf_id_pair *idmap)
|
||||
{
|
||||
int i, spi;
|
||||
|
||||
|
@ -9310,32 +9303,23 @@ static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
|
|||
* whereas register type in current state is meaningful, it means that
|
||||
* the current state will reach 'bpf_exit' instruction safely
|
||||
*/
|
||||
static bool func_states_equal(struct bpf_func_state *old,
|
||||
static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
|
||||
struct bpf_func_state *cur)
|
||||
{
|
||||
struct idpair *idmap;
|
||||
bool ret = false;
|
||||
int i;
|
||||
|
||||
idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
|
||||
/* If we failed to allocate the idmap, just say it's not safe */
|
||||
if (!idmap)
|
||||
memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
|
||||
for (i = 0; i < MAX_BPF_REG; i++)
|
||||
if (!regsafe(&old->regs[i], &cur->regs[i], env->idmap_scratch))
|
||||
return false;
|
||||
|
||||
if (!stacksafe(old, cur, env->idmap_scratch))
|
||||
return false;
|
||||
|
||||
for (i = 0; i < MAX_BPF_REG; i++) {
|
||||
if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (!stacksafe(old, cur, idmap))
|
||||
goto out_free;
|
||||
|
||||
if (!refsafe(old, cur))
|
||||
goto out_free;
|
||||
ret = true;
|
||||
out_free:
|
||||
kfree(idmap);
|
||||
return ret;
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool states_equal(struct bpf_verifier_env *env,
|
||||
|
@ -9362,7 +9346,7 @@ static bool states_equal(struct bpf_verifier_env *env,
|
|||
for (i = 0; i <= old->curframe; i++) {
|
||||
if (old->frame[i]->callsite != cur->frame[i]->callsite)
|
||||
return false;
|
||||
if (!func_states_equal(old->frame[i], cur->frame[i]))
|
||||
if (!func_states_equal(env, old->frame[i], cur->frame[i]))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
|
Loading…
Reference in New Issue
Block a user